本文整理汇总了Python中tensorflow.variable_op_scope方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.variable_op_scope方法的具体用法?Python tensorflow.variable_op_scope怎么用?Python tensorflow.variable_op_scope使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.variable_op_scope方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: join
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variable_op_scope [as 别名]
def join(columns,
coin):
"""Takes mean of the columns, applies drop path if
`tflearn.get_training_mode()` is True.
Args:
columns: columns of fractal block.
is_training: boolean in tensor form. Determines whether drop path
should be used.
coin: boolean in tensor form. Determines whether drop path is
local or global.
"""
if len(columns)==1:
return columns[0]
with tf.variable_op_scope(columns, None, "Join"):
columns = tf.convert_to_tensor(columns)
columns = tf.cond(tflearn.get_training_mode(),
lambda: drop_path(columns, coin),
lambda: columns)
out = tf.reduce_mean(columns, 0)
return out
示例2: repeat_op
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variable_op_scope [as 别名]
def repeat_op(repetitions, inputs, op, *args, **kwargs):
"""Build a sequential Tower starting from inputs by using an op repeatedly.
It creates new scopes for each operation by increasing the counter.
Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
it will repeat the given op under the following variable_scopes:
conv1/Conv
conv1/Conv_1
conv1/Conv_2
Args:
repetitions: number or repetitions.
inputs: a tensor of size [batch_size, height, width, channels].
op: an operation.
*args: args for the op.
**kwargs: kwargs for the op.
Returns:
a tensor result of applying the operation op, num times.
Raises:
ValueError: if the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
tower = inputs
for _ in range(repetitions):
tower = op(tower, *args, **kwargs)
return tower
示例3: coin_flip
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variable_op_scope [as 别名]
def coin_flip(prob=.5):
"""Random boolean variable, with `prob` chance of being true.
Used to choose between local and global drop path.
Args:
prob:float, probability of being True.
"""
with tf.variable_op_scope([],None,"CoinFlip"):
coin = tf.random_uniform([1])[0]>prob
return coin
示例4: drop_path
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variable_op_scope [as 别名]
def drop_path(columns,
coin):
with tf.variable_op_scope([columns], None, "DropPath"):
out = tf.cond(coin,
lambda : drop_some(columns),
lambda : random_column(columns))
return out
示例5: policy
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variable_op_scope [as 别名]
def policy(obs, theta, name='policy'):
with tf.variable_op_scope([obs], name, name):
h0 = tf.identity(obs, name='h0-obs')
h1 = tf.nn.relu(tf.matmul(h0, theta[0]) + theta[1], name='h1')
h2 = tf.nn.relu(tf.matmul(h1, theta[2]) + theta[3], name='h2')
h3 = tf.identity(tf.matmul(h2, theta[4]) + theta[5], name='h3')
action = tf.nn.tanh(h3, name='h4-action')
return action
示例6: qfunction
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variable_op_scope [as 别名]
def qfunction(obs, act, theta, name="qfunction"):
with tf.variable_op_scope([obs, act], name, name):
h0 = tf.identity(obs, name='h0-obs')
h0a = tf.identity(act, name='h0-act')
h1 = tf.nn.relu(tf.matmul(h0, theta[0]) + theta[1], name='h1')
h1a = tf.concat(1, [h1, act])
h2 = tf.nn.relu(tf.matmul(h1a, theta[2]) + theta[3], name='h2')
qs = tf.matmul(h2, theta[4]) + theta[5]
q = tf.squeeze(qs, [1], name='h3-q')
return q
示例7: fractal_template
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variable_op_scope [as 别名]
def fractal_template(inputs,
num_columns,
block_fn,
block_asc,
joined=True,
is_training=True,
reuse=False,
scope=None):
"""Template for making fractal blocks.
Given a function and a corresponding arg_scope `fractal_template`
will build a truncated fractal with `num_columns` columns.
Args:
inputs: a 4-D tensor `[batch_size, height, width, channels]`.
num_columns: integer, the columns in the fractal.
block_fn: function to be called within each fractal.
block_as: A function that returns argscope for `block_fn`.
joined: boolean, whether the output columns should be joined.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
scope: Optional scope for `variable_scope`.
"""
def fractal_expand(inputs, num_columns, joined):
'''Recursive Helper Function for making fractal'''
with block_asc():
output = lambda cols: join(cols, coin) if joined else cols
if num_columns == 1:
return output([block_fn(inputs)])
left = block_fn(inputs)
right = fractal_expand(inputs, num_columns-1, joined=True)
right = fractal_expand(right, num_columns-1, joined=False)
cols=[left]+right
return output(cols)
with tf.variable_op_scope([inputs], scope, 'Fractal',
reuse=reuse) as scope:
coin = coin_flip()
net=fractal_expand(inputs, num_columns, joined)
return net