本文整理汇总了Python中tensorflow.Dimension方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.Dimension方法的具体用法?Python tensorflow.Dimension怎么用?Python tensorflow.Dimension使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.Dimension方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dimension [as 别名]
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
input_dim = input_shape[-1]
if isinstance(input_dim, tf.Dimension):
input_dim = input_dim.value
self.local_scale = self.add_weight(
shape=(input_dim,),
name='local_scale',
initializer=self.local_scale_initializer,
regularizer=self.local_scale_regularizer,
constraint=self.local_scale_constraint)
self.global_scale = self.add_weight(
shape=(),
name='global_scale',
initializer=self.global_scale_initializer,
regularizer=self.global_scale_regularizer,
constraint=self.global_scale_constraint)
super(DenseHierarchical, self).build(input_shape)
示例2: build
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dimension [as 别名]
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
last_dim = input_shape[-1]
if isinstance(last_dim, tf.Dimension):
last_dim = last_dim.value
if last_dim is None:
raise ValueError('The last dimension of the inputs to `ActNorm` '
'should be defined. Found `None`.')
bias = self.add_weight('bias', [last_dim], dtype=self.dtype)
log_scale = self.add_weight('log_scale', [last_dim], dtype=self.dtype)
# Set data-dependent initializers.
bias = bias.assign(self.bias_initial_value)
with tf.control_dependencies([bias]):
self.bias = bias
log_scale = log_scale.assign(self.log_scale_initial_value)
with tf.control_dependencies([log_scale]):
self.log_scale = log_scale
self.built = True
示例3: build
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dimension [as 别名]
def build(self, input_shape=None):
input_shape = tf.TensorShape(input_shape)
input_dim = input_shape[-1]
if isinstance(input_dim, tf.Dimension):
input_dim = input_dim.value
self.conditional_inputs = self.add_weight(
shape=(self.num_inducing, input_dim),
name='inducing_inputs',
initializer=self.inducing_inputs_initializer,
regularizer=self.inducing_inputs_regularizer,
constraint=self.inducing_inputs_constraint)
self.conditional_outputs = self.add_weight(
shape=(self.num_inducing, self.units),
name='inducing_outputs',
initializer=self.inducing_outputs_initializer,
regularizer=self.inducing_outputs_regularizer,
constraint=self.inducing_outputs_constraint)
super(SparseGaussianProcess, self).build(input_shape)
示例4: count_trainable_params
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dimension [as 别名]
def count_trainable_params(graph: Optional[tf.Graph] = None) -> int:
"""
Count number of trainable parameters inside of `tf.trainable_variables`
Parameters
----------
graph
tensorflow graph
Returns
-------
number_of_parameters
number of trainable parameters
"""
graph = graph or tf.get_default_graph()
total_parameters = 0
for variable in graph.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):
# shape is an array of tf.Dimension
shape = variable.get_shape()
variable_parameters = 1
for dim in shape:
variable_parameters *= dim.value
total_parameters += variable_parameters
return total_parameters
示例5: select_present
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dimension [as 别名]
def select_present(x, presence, batch_size=1, name='select_present'):
with tf.variable_scope(name):
presence = 1 - tf.to_int32(presence) # invert mask
bs = x.get_shape()[0]
if bs != None: # here type(bs) is tf.Dimension and == is ok
batch_size = int(bs)
num_partitions = 2 * batch_size
r = tf.range(0, num_partitions, 2)
r.set_shape(tf.TensorShape(batch_size))
r = broadcast_against(r, presence)
presence += r
selected = tf.dynamic_partition(x, presence, num_partitions)
selected = tf.concat(axis=0, values=selected)
selected = tf.reshape(selected, tf.shape(x))
return selected
示例6: testWhileShapeInference
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dimension [as 别名]
def testWhileShapeInference(self):
with self.test_session():
i = tf.constant(0)
m = tf.ones([2, 2])
c = lambda i, j: tf.less(i, 2)
def b(i, j):
new_i = tf.add(i, 1)
new_j = tf.concat(0, [j, j])
return [new_i, new_j]
r = tf.while_loop(c, b, [i, m],
[i.get_shape(), tensor_shape.TensorShape([None, 2])])
self.assertTrue(r[1].get_shape()[0].value is None)
self.assertEqual(r[1].get_shape()[1], tf.Dimension(2))
with self.assertRaisesRegexp(ValueError, "not an invariant for"):
r = tf.while_loop(c, b, [i, m])
示例7: get_variables_number
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dimension [as 别名]
def get_variables_number(trainable_variables):
"""
calculate the number of trainable variables in the current network
:param trainable_variables: trainable variables
:return:
total_parameters: the total number of trainable variables
"""
total_parameters = 0
for variable in trainable_variables:
# shape is an array of tf.Dimension
shapes = variable.get_shape()
variable_parameters = 1
for shape in shapes:
variable_parameters *= shape.value
total_parameters += variable_parameters
return total_parameters
示例8: repeat_2d
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dimension [as 别名]
def repeat_2d(x, reps, axis):
assert(axis == 0 or axis == 1)
if axis == 1:
x = tf.transpose(x)
static_shape = list(x.get_shape())
dyn_shape = tf.shape(x)
x_repeat = tf.reshape(tf.tile(x, [1, reps]), (dyn_shape[0] * reps, dyn_shape[1]))
if static_shape[0].value is not None:
static_shape[0] = tf.Dimension(static_shape[0].value *reps)
x_repeat.set_shape(static_shape)
if axis == 1:
x_repeat = tf.transpose(x_repeat)
return x_repeat
示例9: show_parameter_count
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dimension [as 别名]
def show_parameter_count(variables):
"""
Count and print how many parameters there are.
"""
total_parameters = 0
for variable in variables:
name = variable.name
# shape is an array of tf.Dimension
shape = variable.get_shape()
variable_parametes = 1
for dim in shape:
variable_parametes *= dim.value
print('{}: {} ({} parameters)'.format(name,
shape,
variable_parametes))
total_parameters += variable_parametes
print('Total: {} parameters'.format(total_parameters))
示例10: clip_gradients
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dimension [as 别名]
def clip_gradients(gvs, value_clip=0, norm_clip=0):
"""Clips gradients."""
grads, vs = zip(*gvs)
grads = list(grads)
if value_clip > 0:
for i, g in enumerate(grads):
if g is not None:
grads[i] = tf.clip_by_value(g, -value_clip, value_clip)
if norm_clip > 0:
n_params = sum(np.prod(g.shape) for g in grads if g is not None)
# n_params is most likely tf.Dimension and cannot be converted
# to float directly
norm_clip *= np.sqrt(float(int(n_params)))
grads_to_clip = [(i, g) for i, g in enumerate(grads) if g is not None]
idx, grads_to_clip = zip(*grads_to_clip)
clipped_grads = tf.clip_by_global_norm(grads_to_clip, norm_clip)[0]
for i, g in zip(idx, clipped_grads):
grads[i] = g
return [item for item in zip(grads, vs)]
示例11: generate_iterator_ops
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dimension [as 别名]
def generate_iterator_ops(filenames, train=True, reuse=False):
dataset = tf.data.TFRecordDataset(filenames)
dataset = dataset.map(_parse_function)
if train:
dataset = dataset.shuffle(buffer_size=2 * FLAGS.batch_size)
dataset = dataset.padded_batch(
FLAGS.batch_size,
([tf.Dimension(None), tf.Dimension(1024), tf.Dimension(3)],
[tf.Dimension(None)], [])
)
data_iterator = dataset.make_initializable_iterator()
next_x, next_y, next_l = data_iterator.get_next()
if train:
ops = annotation_func_train(next_x, next_y, next_l, train=train, reuse=reuse)
else:
ops = annotation_func_test(next_x, next_l, reuse=reuse)
ops = list(ops)
ops.append(next_y)
ops.append(next_l)
return data_iterator, ops
示例12: build
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dimension [as 别名]
def build(self, input_shape):
self._validate_input_shape(input_shape)
d_k = self._d_k if self._d_k else input_shape[1][-1]
d_model = self._d_model if self._d_model else input_shape[1][-1]
d_v = self._d_v
if type(d_k) == tf.Dimension:
d_k = d_k.value
if type(d_model) == tf.Dimension:
d_model = d_model.value
self._q_layers = []
self._k_layers = []
self._v_layers = []
self._sdp_layer = ScaledDotProductAttention(return_attention=self._return_attention)
for _ in range(self._h):
self._q_layers.append(
TimeDistributed(
Dense(d_k, activation=self._activation, use_bias=False)
)
)
self._k_layers.append(
TimeDistributed(
Dense(d_k, activation=self._activation, use_bias=False)
)
)
self._v_layers.append(
TimeDistributed(
Dense(d_v, activation=self._activation, use_bias=False)
)
)
self._output = TimeDistributed(Dense(d_model))
#if self._return_attention:
# self._output = Concatenate()
示例13: shape_to_list
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dimension [as 别名]
def shape_to_list(shape: Iterable[tf.Dimension]) -> List[Union[int, None]]:
"""Convert a Tensorflow shape to a list of ints."""
return [dim.value for dim in shape]
示例14: _compute_fans
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dimension [as 别名]
def _compute_fans(shape):
"""Computes the number of input and output units for a weight shape.
Args:
shape: Integer shape tuple or TF tensor shape.
Returns:
A tuple of scalars (fan_in, fan_out).
"""
if len(shape) < 1: # Just to avoid errors for constants.
fan_in = fan_out = 1
elif len(shape) == 1:
fan_in = fan_out = shape[0]
elif len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
else:
# Assuming convolution kernels (2D, 3D, or more).
# kernel shape: (..., input_depth, depth)
receptive_field_size = 1.
for dim in shape[:-2]:
receptive_field_size *= dim
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
if isinstance(fan_in, tf.Dimension):
fan_in = fan_in.value
if isinstance(fan_out, tf.Dimension):
fan_out = fan_out.value
return fan_in, fan_out
示例15: compute_output_shape
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dimension [as 别名]
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
input_dim = input_shape[-1]
if isinstance(input_dim, tf.Dimension):
input_dim = input_dim.value
if input_dim is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units)