本文整理汇总了Python中tensorflow.variable_scope方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.variable_scope方法的具体用法?Python tensorflow.variable_scope怎么用?Python tensorflow.variable_scope使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.variable_scope方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: stackedRNN
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variable_scope [as 别名]
def stackedRNN(self, x, dropout, scope, embedding_size, sequence_length, hidden_units):
n_hidden=hidden_units
n_layers=3
# Prepare data shape to match `static_rnn` function requirements
x = tf.unstack(tf.transpose(x, perm=[1, 0, 2]))
# print(x)
# Define lstm cells with tensorflow
# Forward direction cell
with tf.name_scope("fw"+scope),tf.variable_scope("fw"+scope):
stacked_rnn_fw = []
for _ in range(n_layers):
fw_cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden, forget_bias=1.0, state_is_tuple=True)
lstm_fw_cell = tf.contrib.rnn.DropoutWrapper(fw_cell,output_keep_prob=dropout)
stacked_rnn_fw.append(lstm_fw_cell)
lstm_fw_cell_m = tf.nn.rnn_cell.MultiRNNCell(cells=stacked_rnn_fw, state_is_tuple=True)
outputs, _ = tf.nn.static_rnn(lstm_fw_cell_m, x, dtype=tf.float32)
return outputs[-1]
示例2: wrap_variable
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variable_scope [as 别名]
def wrap_variable(self, var):
"""wrap layer.w into variables"""
val = self.lay.w.get(var, None)
if val is None:
shape = self.lay.wshape[var]
args = [0., 1e-2, shape]
if 'moving_mean' in var:
val = np.zeros(shape)
elif 'moving_variance' in var:
val = np.ones(shape)
else:
val = np.random.normal(*args)
self.lay.w[var] = val.astype(np.float32)
self.act = 'Init '
if not self.var: return
val = self.lay.w[var]
self.lay.w[var] = tf.constant_initializer(val)
if var in self._SLIM: return
with tf.variable_scope(self.scope):
self.lay.w[var] = tf.get_variable(var,
shape = self.lay.wshape[var],
dtype = tf.float32,
initializer = self.lay.w[var])
示例3: normalize
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variable_scope [as 别名]
def normalize(inputs,
scope="normalize",
reuse=None):
'''Applies layer normalization that normalizes along the last axis.
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension has
`batch_size`. The normalization is over the last dimension.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A tensor with the same shape and data dtype as `inputs`.
'''
outputs = tf.contrib.layers.layer_norm(inputs,
begin_norm_axis=-1,
scope=scope,
reuse=reuse)
return outputs
示例4: highwaynet
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variable_scope [as 别名]
def highwaynet(inputs, num_units=None, scope="highwaynet", reuse=None):
'''Highway networks, see https://arxiv.org/abs/1505.00387
Args:
inputs: A 3D tensor of shape [N, T, W].
num_units: An int or `None`. Specifies the number of units in the highway layer
or uses the input size if `None`.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A 3D tensor of shape [N, T, W].
'''
if not num_units:
num_units = inputs.get_shape()[-1]
with tf.variable_scope(scope, reuse=reuse):
H = tf.layers.dense(inputs, units=num_units, activation=tf.nn.relu, name="dense1")
T = tf.layers.dense(inputs, units=num_units, activation=tf.nn.sigmoid,
bias_initializer=tf.constant_initializer(-1.0), name="dense2")
outputs = H * T + inputs * (1. - T)
return outputs
示例5: block35
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variable_scope [as 别名]
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 35x35 resnet block."""
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_1, tower_conv2_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
示例6: block17
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variable_scope [as 别名]
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 17x17 resnet block."""
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7],
scope='Conv2d_0b_1x7')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1],
scope='Conv2d_0c_7x1')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
示例7: block8
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variable_scope [as 别名]
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 8x8 resnet block."""
with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3],
scope='Conv2d_0b_1x3')
tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1],
scope='Conv2d_0c_3x1')
mixed = tf.concat(axis=3, values=[tower_conv, tower_conv1_2])
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
示例8: fprop
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variable_scope [as 别名]
def fprop(self, x, **kwargs):
del kwargs
my_conv = functools.partial(tf.layers.conv2d,
kernel_size=3,
strides=2,
padding='valid',
activation=tf.nn.relu,
kernel_initializer=HeReLuNormalInitializer)
my_dense = functools.partial(
tf.layers.dense, kernel_initializer=HeReLuNormalInitializer)
with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
for depth in [96, 256, 384, 384, 256]:
x = my_conv(x, depth)
y = tf.layers.flatten(x)
y = my_dense(y, 4096, tf.nn.relu)
y = fc7 = my_dense(y, 4096, tf.nn.relu)
y = my_dense(y, 1000)
return {'fc7': fc7,
self.O_LOGITS: y,
self.O_PROBS: tf.nn.softmax(logits=y)}
示例9: setUp
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variable_scope [as 别名]
def setUp(self):
super(TestRunnerMultiGPU, self).setUp()
self.sess = tf.Session()
inputs = []
outputs = []
self.niter = 10
niter = self.niter
# A Simple graph with `niter` sub-graphs.
with tf.variable_scope(None, 'runner'):
for i in range(niter):
v = tf.get_variable('v%d' % i, shape=(100, 10))
w = tf.get_variable('w%d' % i, shape=(100, 1))
inputs += [{'v': v, 'w': w}]
outputs += [{'v': v, 'w': w}]
self.runner = RunnerMultiGPU(inputs, outputs, sess=self.sess)
示例10: preprocess_batch
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variable_scope [as 别名]
def preprocess_batch(images_batch, preproc_func=None):
"""
Creates a preprocessing graph for a batch given a function that processes
a single image.
:param images_batch: A tensor for an image batch.
:param preproc_func: (optional function) A function that takes in a
tensor and returns a preprocessed input.
"""
if preproc_func is None:
return images_batch
with tf.variable_scope('preprocess'):
images_list = tf.split(images_batch, int(images_batch.shape[0]))
result_list = []
for img in images_list:
reshaped_img = tf.reshape(img, img.shape[1:])
processed_img = preproc_func(reshaped_img)
result_list.append(tf.expand_dims(processed_img, axis=0))
result_images = tf.concat(result_list, axis=0)
return result_images
示例11: set_input_shape
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variable_scope [as 别名]
def set_input_shape(self, input_shape):
batch_size, rows, cols, input_channels = input_shape
kernel_shape = tuple(self.kernel_shape) + (input_channels,
self.output_channels)
assert len(kernel_shape) == 4
assert all(isinstance(e, int) for e in kernel_shape), kernel_shape
with tf.variable_scope(self.name):
init = tf.truncated_normal(kernel_shape, stddev=0.1)
self.kernels = self.get_variable(self.w_name, init)
self.b = self.get_variable(
'b', .1 + np.zeros((self.output_channels,)).astype('float32'))
input_shape = list(input_shape)
self.input_shape = input_shape
input_shape[0] = 1
dummy_batch = tf.zeros(input_shape)
dummy_output = self.fprop(dummy_batch)
output_shape = [int(e) for e in dummy_output.get_shape()]
output_shape[0] = 1
self.output_shape = tuple(output_shape)
示例12: build_cost
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variable_scope [as 别名]
def build_cost(self, labels, logits):
"""
Build the graph for cost from the logits if logits are provided.
If predictions are provided, logits are extracted from the operation.
"""
op = logits.op
if "softmax" in str(op).lower():
logits, = op.inputs
with tf.variable_scope('costs'):
xent = tf.nn.softmax_cross_entropy_with_logits(
logits=logits, labels=labels)
cost = tf.reduce_mean(xent, name='xent')
cost += self._decay()
cost = cost
return cost
示例13: run_eval
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variable_scope [as 别名]
def run_eval(sess, test_X, test_y):
ds = tf.data.Dataset.from_tensor_slices((test_X, test_y))
ds = ds.batch(1)
X, y = ds.make_one_shot_iterator().get_next()
with tf.variable_scope("model", reuse=True):
prediction, _, _ = lstm_model(X, [0.0], False)
predictions = []
labels = []
for i in range(TESTING_EXAMPLES):
p, l = sess.run([prediction, y])
predictions.append(p)
labels.append(l)
predictions = np.array(predictions).squeeze()
labels = np.array(labels).squeeze()
rmse = np.sqrt(((predictions-labels) ** 2).mean(axis=0))
print("Mean Square Error is: %f" % rmse)
plt.figure()
plt.plot(predictions, label='predictions')
plt.plot(labels, label='real_sin')
plt.legend()
plt.show()
示例14: build_permutation
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variable_scope [as 别名]
def build_permutation(self):
with tf.variable_scope("encoder"):
with tf.variable_scope("embedding"):
# Embed input sequence
W_embed =tf.get_variable("weights", [1,self.input_dimension+2, self.input_embed], initializer=self.initializer) # +2 for TW feat. here too
embedded_input = tf.nn.conv1d(self.input_, W_embed, 1, "VALID", name="embedded_input")
# Batch Normalization
embedded_input = tf.layers.batch_normalization(embedded_input, axis=2, training=self.is_training, name='layer_norm', reuse=None)
with tf.variable_scope("dynamic_rnn"):
# Encode input sequence
cell1 = LSTMCell(self.num_neurons, initializer=self.initializer) # BNLSTMCell(self.num_neurons, self.training) or cell1 = DropoutWrapper(cell1, output_keep_prob=0.9)
# Return the output activations [Batch size, Sequence Length, Num_neurons] and last hidden state as tensors.
encoder_output, encoder_state = tf.nn.dynamic_rnn(cell1, embedded_input, dtype=tf.float32)
with tf.variable_scope('decoder'):
# Ptr-net returns permutations (self.positions), with their log-probability for backprop
self.ptr = Pointer_decoder(encoder_output, self.config)
self.positions, self.log_softmax, self.attending, self.pointing = self.ptr.loop_decode(encoder_state)
variable_summaries('log_softmax',self.log_softmax, with_max_min = True)
示例15: feedforward
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import variable_scope [as 别名]
def feedforward(inputs, num_units=[2048, 512], is_training=True):
with tf.variable_scope("ffn", reuse=None):
# Inner layer
params = {"inputs": inputs, "filters": num_units[0], "kernel_size": 1, "activation": tf.nn.relu, "use_bias": True}
outputs = tf.layers.conv1d(**params)
# Readout layer
params = {"inputs": outputs, "filters": num_units[1], "kernel_size": 1, "activation": None, "use_bias": True}
outputs = tf.layers.conv1d(**params)
# Residual connection
outputs += inputs
# Normalize
outputs = tf.layers.batch_normalization(outputs, axis=2, training=is_training, name='ln', reuse=None) # [batch_size, seq_length, n_hidden]
return outputs