本文整理汇总了Python中tensorflow.python.ops.array_ops.split方法的典型用法代码示例。如果您正苦于以下问题:Python array_ops.split方法的具体用法?Python array_ops.split怎么用?Python array_ops.split使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.array_ops
的用法示例。
在下文中一共展示了array_ops.split方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: call
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import split [as 别名]
def call(self, inputs, state):
"""Gated recurrent unit (GRU) with nunits cells."""
with vs.variable_scope("gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
bias_ones = self._bias_initializer
if self._bias_initializer is None:
dtype = [a.dtype for a in [inputs, state]][0]
bias_ones = init_ops.constant_initializer(1.0, dtype=dtype)
value = math_ops.sigmoid(
_linear([inputs, state], 2 * self._num_units, True, bias_ones,
self._kernel_initializer))
r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
with vs.variable_scope("candidate"):
c = self._activation(
_linear([inputs, r * state], self._num_units, True,
self._bias_initializer, self._kernel_initializer))
new_h = u * state + (1 - u) * c
return new_h, new_h
示例2: call
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import split [as 别名]
def call(self, inputs, state):
"""LSTM cell with layer normalization and recurrent dropout."""
c, h = state
args = array_ops.concat([inputs, h], 1)
concat = self._linear(args)
i, j, f, o = array_ops.split(value=concat, num_or_size_splits=4, axis=1)
if self._layer_norm:
i = self._norm(i, "input")
j = self._norm(j, "transform")
f = self._norm(f, "forget")
o = self._norm(o, "output")
g = self._activation(j)
if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1:
g = nn_ops.dropout(g, self._keep_prob, seed=self._seed)
new_c = (c * math_ops.sigmoid(f + self._forget_bias)
+ math_ops.sigmoid(i) * g)
if self._layer_norm:
new_c = self._norm(new_c, "state")
new_h = self._activation(new_c) * math_ops.sigmoid(o)
new_state = rnn_cell_impl.LSTMStateTuple(new_c, new_h)
return new_h, new_state
示例3: set_value
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import split [as 别名]
def set_value(x, value):
"""Sets the value of a variable, from a Numpy array.
Arguments:
x: Tensor to set to a new value.
value: Value to set the tensor to, as a Numpy array
(of the same shape).
"""
value = np.asarray(value)
tf_dtype = _convert_string_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
assign_placeholder = array_ops.placeholder(tf_dtype, shape=value.shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
get_session().run(assign_op, feed_dict={assign_placeholder: value})
示例4: batch_set_value
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import split [as 别名]
def batch_set_value(tuples):
"""Sets the values of many tensor variables at once.
Arguments:
tuples: a list of tuples `(tensor, value)`.
`value` should be a Numpy array.
"""
if tuples:
assign_ops = []
feed_dict = {}
for x, value in tuples:
value = np.asarray(value)
tf_dtype = _convert_string_dtype(x.dtype.name.split('_')[0])
if hasattr(x, '_assign_placeholder'):
assign_placeholder = x._assign_placeholder
assign_op = x._assign_op
else:
assign_placeholder = array_ops.placeholder(tf_dtype, shape=value.shape)
assign_op = x.assign(assign_placeholder)
x._assign_placeholder = assign_placeholder
x._assign_op = assign_op
assign_ops.append(assign_op)
feed_dict[assign_placeholder] = value
get_session().run(assign_ops, feed_dict=feed_dict)
示例5: _weighted_gini
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import split [as 别名]
def _weighted_gini(self, class_counts):
"""Our split score is the Gini impurity times the number of examples.
If c(i) denotes the i-th class count and c = sum_i c(i) then
score = c * (1 - sum_i ( c(i) / c )^2 )
= c - sum_i c(i)^2 / c
Args:
class_counts: A 2-D tensor of per-class counts, usually a slice or
gather from variables.node_sums.
Returns:
A 1-D tensor of the Gini impurities for each row in the input.
"""
smoothed = 1.0 + array_ops.slice(class_counts, [0, 1], [-1, -1])
sums = math_ops.reduce_sum(smoothed, 1)
sum_squares = math_ops.reduce_sum(math_ops.square(smoothed), 1)
return sums - sum_squares / sums
示例6: __call__
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import split [as 别名]
def __call__(self, inputs, state, scope=None):
"""Gated recurrent unit (GRU) with nunits cells."""
with vs.variable_scope(scope or "gru_cell"):
with vs.variable_scope("gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
r, u = array_ops.split(
value=_linear(
[inputs, state], 2 * self._num_units, True, 1.0, scope=scope),
num_or_size_splits=2,
axis=1)
r, u = sigmoid(r), sigmoid(u)
with vs.variable_scope("candidate"):
c = self._activation(_linear([inputs, r * state],
self._num_units, True,
scope=scope))
new_h = u * state + (1 - u) * c
return new_h, new_h
示例7: __call__
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import split [as 别名]
def __call__(self, inputs, state, scope=None):
"""Attention GRU with nunits cells."""
with vs.variable_scope(scope or "attention_gru_cell"):
with vs.variable_scope("gates"): # Reset gate and update gate.
# We start with bias of 1.0 to not reset and not update.
if inputs.get_shape()[-1] != self._num_units + 1:
raise ValueError("Input should be passed as word input concatenated with 1D attention on end axis")
# extract input vector and attention
inputs, g = array_ops.split(inputs,
num_or_size_splits=[self._num_units,1],
axis=1)
r = _linear([inputs, state], self._num_units, True)
r = sigmoid(r)
with vs.variable_scope("candidate"):
r = r*_linear(state, self._num_units, False)
with vs.variable_scope("input"):
x = _linear(inputs, self._num_units, True)
h_hat = self._activation(r + x)
new_h = (1 - g) * state + g * h_hat
return new_h, new_h
示例8: __call__
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import split [as 别名]
def __call__(self, inputs, state, scope=None):
"""Run one step of SRU."""
with tf.variable_scope(scope or type(self).__name__): # "SRUCell"
with tf.variable_scope("x_hat"):
x = linear([inputs], self._num_units, False)
with tf.variable_scope("gates"):
concat = tf.sigmoid(linear([inputs], 2 * self._num_units, True))
f, r = tf.split(concat, 2, axis = 1)
with tf.variable_scope("candidates"):
c = self._activation(f * state + (1 - f) * x)
# variational dropout as suggested in the paper (disabled)
# if self._is_training and Params.dropout is not None:
# c = tf.nn.dropout(c, keep_prob = 1 - Params.dropout)
# highway connection
# Our implementation is slightly different to the paper
# https://arxiv.org/abs/1709.02755 in a way that highway network
# uses x_hat instead of the cell inputs. Check equation (7) from the original
# paper for SRU.
h = r * c + (1 - r) * x
return h, c
示例9: __call__
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import split [as 别名]
def __call__(self, inputs, state, k_size=3, scope=None):
"""Convolutional Long short-term memory cell (ConvLSTM)."""
with vs.variable_scope(scope or type(self).__name__): # "ConvLSTMCell"
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(3, 2, state)
# batch_size * height * width * channel
concat = _conv([inputs, h], 4 * self._num_units, k_size, True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(3, 4, concat)
new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
self._activation(j))
new_h = self._activation(new_c) * sigmoid(o)
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat(3, [new_c, new_h])
return new_h, new_state
示例10: call
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import split [as 别名]
def call(self, inputs, state):
"""forward propagation of the cell
Args:
inputs: a 2D `Tensor` of [batch_size x input_size] shape
state: a `tuple` of 2D `Tensor` of [batch_size x num_units] shape
or a `Tensor` of [batch_size x (num_units * (self.depth + 1))] shape
Returns:
outputs: a 2D `Tensor` of [batch_size x num_units] shape
next_state: a `tuple` of 2D `Tensor` of [batch_size x num_units] shape
or a `Tensor` of [batch_size x (num_units * (self.depth + 1))] shape
"""
if not self._state_is_tuple:
states = array_ops.split(state, self.depth + 1, axis=1)
else:
states = state
hidden_state = states[0]
cell_states = states[1:]
outputs, next_state = self._recurrence(inputs, hidden_state, cell_states, 0)
if self._state_is_tuple:
next_state = tuple(next_state)
else:
next_state = array_ops.concat(next_state, axis=1)
return outputs, next_state
示例11: call
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import split [as 别名]
def call(self, inputs, state, scope=None):
cell, hidden = state
new_hidden = _conv([inputs, hidden],
self._kernel_shape,
4*self._output_channels,
self._use_bias)
gates = array_ops.split(value=new_hidden,
num_or_size_splits=4,
axis=self._conv_ndims+1)
input_gate, new_input, forget_gate, output_gate = gates
new_cell = math_ops.sigmoid(forget_gate + self._forget_bias) * cell
new_cell += math_ops.sigmoid(input_gate) * math_ops.tanh(new_input)
output = math_ops.tanh(new_cell) * self._activation(output_gate)
if self._skip_connection:
output = array_ops.concat([output, inputs], axis=-1)
new_state = rnn_cell_impl.LSTMStateTuple(new_cell, output)
return output, new_state
示例12: __call__
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import split [as 别名]
def __call__(self, inputs, state, scope=None):
"""Long short-term memory cell (LSTM)."""
with vs.variable_scope(scope or type(self).__name__): # "BasicLSTMCell"
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(1, 2, state)
concat = _linear([inputs, h], 4 * self._num_units, True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(1, 4, concat)
new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
self._activation(j))
new_h = self._activation(new_c) * sigmoid(o)
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat(1, [new_c, new_h])
return new_h, new_state
示例13: testForwardBackward
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import split [as 别名]
def testForwardBackward(self):
def f(x):
return core_layers.dense(x, self.CHANNELS // 2, use_bias=True)
def g(x):
return core_layers.dense(x, self.CHANNELS // 2, use_bias=True)
x = random_ops.random_uniform(
[self.BATCH_SIZE, self.CHANNELS], dtype=dtypes.float32)
x1, x2 = array_ops.split(x, 2, axis=-1)
block = rev_block_lib.RevBlock(f, g, num_layers=3)
y1, y2 = block.forward(x1, x2)
x1_inv, x2_inv = block.backward(y1, y2)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
x1, x2, x1_inv, x2_inv = sess.run([x1, x2, x1_inv, x2_inv])
self.assertAllClose(x1, x1_inv, atol=1e-5)
self.assertAllClose(x2, x2_inv, atol=1e-5)
示例14: testBackwardForward
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import split [as 别名]
def testBackwardForward(self):
def f(x):
return core_layers.dense(x, self.CHANNELS // 2, use_bias=True)
def g(x):
return core_layers.dense(x, self.CHANNELS // 2, use_bias=True)
y = random_ops.random_uniform(
[self.BATCH_SIZE, self.CHANNELS], dtype=dtypes.float32)
y1, y2 = array_ops.split(y, 2, axis=-1)
block = rev_block_lib.RevBlock(f, g, num_layers=3)
x1, x2 = block.backward(y1, y2)
y1_inv, y2_inv = block.forward(x1, x2)
with self.cached_session() as sess:
sess.run(variables.global_variables_initializer())
y1, y2, y1_inv, y2_inv = sess.run([y1, y2, y1_inv, y2_inv])
self.assertAllClose(y1, y1_inv, rtol=1e-5)
self.assertAllClose(y2, y2_inv, rtol=1e-5)
示例15: __call__
# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import split [as 别名]
def __call__(self, inputs, state, scope=None):
"""Convolutional Long short-term memory cell (ConvLSTM)."""
with vs.variable_scope(scope or type(self).__name__): # "ConvLSTMCell"
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(3, 2, state)
# batch_size * height * width * channel
concat = _conv([inputs, h], 4 * self._num_units, self._k_size, True, initializer=self._initializer)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(3, 4, concat)
new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
self._activation(j))
new_h = self._activation(new_c) * sigmoid(o)
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat(3, [new_c, new_h])
return new_h, new_state