本文整理汇总了Python中baselines.a2c.utils.conv方法的典型用法代码示例。如果您正苦于以下问题:Python utils.conv方法的具体用法?Python utils.conv怎么用?Python utils.conv使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类baselines.a2c.utils
的用法示例。
在下文中一共展示了utils.conv方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: nature_cnn
# 需要导入模块: from baselines.a2c import utils [as 别名]
# 或者: from baselines.a2c.utils import conv [as 别名]
def nature_cnn(unscaled_images):
"""
CNN from Nature paper.
"""
scaled_images = tf.cast(unscaled_images, tf.float32) / 255.
activ = tf.nn.relu
h = activ(conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2)))
h2 = activ(conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2)))
h3 = activ(conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2)))
h3 = conv_to_fc(h3)
return activ(fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2)))
示例2: nature_cnn
# 需要导入模块: from baselines.a2c import utils [as 别名]
# 或者: from baselines.a2c.utils import conv [as 别名]
def nature_cnn(unscaled_images, **conv_kwargs):
"""
CNN from Nature paper.
"""
scaled_images = tf.cast(unscaled_images, tf.float32) / 255.
activ = tf.nn.relu
h = activ(conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2),
**conv_kwargs))
h2 = activ(conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs))
h3 = activ(conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs))
h3 = conv_to_fc(h3)
return activ(fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2)))
示例3: cnn_small
# 需要导入模块: from baselines.a2c import utils [as 别名]
# 或者: from baselines.a2c.utils import conv [as 别名]
def cnn_small(**conv_kwargs):
def network_fn(X):
h = tf.cast(X, tf.float32) / 255.
activ = tf.nn.relu
h = activ(conv(h, 'c1', nf=8, rf=8, stride=4, init_scale=np.sqrt(2), **conv_kwargs))
h = activ(conv(h, 'c2', nf=16, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs))
h = conv_to_fc(h)
h = activ(fc(h, 'fc1', nh=128, init_scale=np.sqrt(2)))
return h, None
return network_fn
示例4: conv_only
# 需要导入模块: from baselines.a2c import utils [as 别名]
# 或者: from baselines.a2c.utils import conv [as 别名]
def conv_only(convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)], **conv_kwargs):
'''
convolutions-only net
Parameters:
----------
conv: list of triples (filter_number, filter_size, stride) specifying parameters for each layer.
Returns:
function that takes tensorflow tensor as input and returns the output of the last convolutional layer
'''
def network_fn(X):
out = tf.cast(X, tf.float32) / 255.
with tf.variable_scope("convnet"):
for num_outputs, kernel_size, stride in convs:
out = layers.convolution2d(out,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
activation_fn=tf.nn.relu,
**conv_kwargs)
return out, None
return network_fn
示例5: cnn_small
# 需要导入模块: from baselines.a2c import utils [as 别名]
# 或者: from baselines.a2c.utils import conv [as 别名]
def cnn_small(**conv_kwargs):
def network_fn(X):
h = tf.cast(X, tf.float32) / 255.
activ = tf.nn.relu
h = activ(conv(h, 'c1', nf=8, rf=8, stride=4, init_scale=np.sqrt(2), **conv_kwargs))
h = activ(conv(h, 'c2', nf=16, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs))
h = conv_to_fc(h)
h = activ(fc(h, 'fc1', nh=128, init_scale=np.sqrt(2)))
return h
return network_fn
示例6: conv_only
# 需要导入模块: from baselines.a2c import utils [as 别名]
# 或者: from baselines.a2c.utils import conv [as 别名]
def conv_only(convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)], **conv_kwargs):
'''
convolutions-only net
Parameters:
----------
conv: list of triples (filter_number, filter_size, stride) specifying parameters for each layer.
Returns:
function that takes tensorflow tensor as input and returns the output of the last convolutional layer
'''
def network_fn(X):
out = tf.cast(X, tf.float32) / 255.
with tf.variable_scope("convnet"):
for num_outputs, kernel_size, stride in convs:
out = layers.convolution2d(out,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
activation_fn=tf.nn.relu,
**conv_kwargs)
return out
return network_fn
示例7: nature_cnn_h3
# 需要导入模块: from baselines.a2c import utils [as 别名]
# 或者: from baselines.a2c.utils import conv [as 别名]
def nature_cnn_h3(unscaled_images, first_layer_mode='', trainable=True, conv1_fn=lambda x: x):
"""
CNN from Nature paper.
"""
scaled_images = tf.cast(unscaled_images, tf.float32) / 255.
activ = tf.nn.relu
print("scaled_images: {}".format(scaled_images))
if first_layer_mode == 'Share':
assert False
# input_activations = []
# for start in range(3):
# input_images = scaled_images[..., start:start+2]
# assert input_images.get_shape()[-1] == 2 # Should be a pair of frames.
# h = activ(conv(input_images, 'c1_2_frame_input', nf=32, rf=8, stride=4, init_scale=np.sqrt(2), reuse=start!=0, trainable=trainable))
# input_activations.append(h)
# assert len(input_activations) == 3 # Should have 3 pairs of frames.
# h = (1. / 3.) * tf.add_n(input_activations, name='c1') # Average the activations of the three pairs of frames.
elif first_layer_mode == '2Frame':
input_images = scaled_images[..., -2:]
assert input_images.get_shape()[-1] == 2 # Should be a pair of frames.
h = activ(conv(input_images, 'c1_2_frame_input', nf=32, rf=8, stride=4, init_scale=np.sqrt(2), trainable=trainable))
else:
assert False
# scaled_images = scaled_images[..., -2:]
# print("scaled_images: {}".format(scaled_images))
# h = activ(conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2), trainable=trainable))
# print('scaled_images: {}'.format(scaled_images.get_shape()))
h = conv1_fn(h)
h2 = activ(conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2), trainable=trainable))
_h3 = conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2), trainable=trainable)
h3 = activ(_h3)
return h3, _h3
示例8: conv_only
# 需要导入模块: from baselines.a2c import utils [as 别名]
# 或者: from baselines.a2c.utils import conv [as 别名]
def conv_only(convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)], **conv_kwargs):
'''
convolutions-only net
Parameters:
----------
conv: list of triples (filter_number, filter_size, stride) specifying parameters for each layer.
Returns:
function that takes tensorflow tensor as input and returns the output of the last convolutional layer
'''
def network_fn(X):
out = tf.cast(X, tf.float32) / 255.
with tf.variable_scope("convnet"):
for num_outputs, kernel_size, stride in convs:
out = tf.contrib.layers.convolution2d(out,
num_outputs=num_outputs,
kernel_size=kernel_size,
stride=stride,
activation_fn=tf.nn.relu,
**conv_kwargs)
return out
return network_fn
示例9: __init__
# 需要导入模块: from baselines.a2c import utils [as 别名]
# 或者: from baselines.a2c.utils import conv [as 别名]
def __init__(self, sess, ob_space, ac_space, nenv, nsteps, nstack, nlstm=256, reuse=False):
nbatch = nenv*nsteps
nh, nw, nc = ob_space.shape
ob_shape = (nbatch, nh, nw, nc*nstack)
nact = ac_space.n
X = tf.placeholder(tf.uint8, ob_shape) #obs
M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)
S = tf.placeholder(tf.float32, [nenv, nlstm*2]) #states
with tf.variable_scope("model", reuse=reuse):
h = conv(tf.cast(X, tf.float32)/255., 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2))
h2 = conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2))
h3 = conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2))
h3 = conv_to_fc(h3)
h4 = fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2))
xs = batch_to_seq(h4, nenv, nsteps)
ms = batch_to_seq(M, nenv, nsteps)
h5, snew = lnlstm(xs, ms, S, 'lstm1', nh=nlstm)
h5 = seq_to_batch(h5)
pi = fc(h5, 'pi', nact, act=lambda x:x)
vf = fc(h5, 'v', 1, act=lambda x:x)
v0 = vf[:, 0]
a0 = sample(pi)
self.initial_state = np.zeros((nenv, nlstm*2), dtype=np.float32)
def step(ob, state, mask):
a, v, s = sess.run([a0, v0, snew], {X:ob, S:state, M:mask})
return a, v, s
def value(ob, state, mask):
return sess.run(v0, {X:ob, S:state, M:mask})
self.X = X
self.M = M
self.S = S
self.pi = pi
self.vf = vf
self.step = step
self.value = value
示例10: __init__
# 需要导入模块: from baselines.a2c import utils [as 别名]
# 或者: from baselines.a2c.utils import conv [as 别名]
def __init__(self, sess, ob_space, ac_space, nenv, nsteps, nstack, reuse=False):
nbatch = nenv * nsteps
nh, nw, nc = ob_space.shape
ob_shape = (nbatch, nh, nw, nc * nstack)
nact = ac_space.n
X = tf.placeholder(tf.uint8, ob_shape) # obs
with tf.variable_scope("model", reuse=reuse):
h = conv(tf.cast(X, tf.float32) / 255., 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2))
h2 = conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2))
h3 = conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2))
h3 = conv_to_fc(h3)
h4 = fc(h3, 'fc1', nh=512, init_scale=np.sqrt(2))
pi_logits = fc(h4, 'pi', nact, act=lambda x: x, init_scale=0.01)
pi = tf.nn.softmax(pi_logits)
q = fc(h4, 'q', nact, act=lambda x: x)
a = sample(pi_logits) # could change this to use self.pi instead
self.initial_state = [] # not stateful
self.X = X
self.pi = pi # actual policy params now
self.q = q
def step(ob, *args, **kwargs):
# returns actions, mus, states
a0, pi0 = sess.run([a, pi], {X: ob})
return a0, pi0, [] # dummy state
def out(ob, *args, **kwargs):
pi0, q0 = sess.run([pi, q], {X: ob})
return pi0, q0
def act(ob, *args, **kwargs):
return sess.run(a, {X: ob})
self.step = step
self.out = out
self.act = act