本文整理汇总了Python中shapes.tensor_dim方法的典型用法代码示例。如果您正苦于以下问题:Python shapes.tensor_dim方法的具体用法?Python shapes.tensor_dim怎么用?Python shapes.tensor_dim使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类shapes
的用法示例。
在下文中一共展示了shapes.tensor_dim方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _AddOutputs
# 需要导入模块: import shapes [as 别名]
# 或者: from shapes import tensor_dim [as 别名]
def _AddOutputs(self, prev_layer, out_dims, out_func, num_classes):
"""Adds the output layer and loss function.
Args:
prev_layer: Output of last layer of main network.
out_dims: Number of output dimensions, 0, 1 or 2.
out_func: Output non-linearity. 's' or 'c'=softmax, 'l'=logistic.
num_classes: Number of outputs/size of last output dimension.
"""
height_in = shapes.tensor_dim(prev_layer, dim=1)
logits, outputs = self._AddOutputLayer(prev_layer, out_dims, out_func,
num_classes)
if self.mode == 'train':
# Setup loss for training.
self.loss = self._AddLossFunction(logits, height_in, out_dims, out_func)
tf.summary.scalar('loss', self.loss)
elif out_dims == 0:
# Be sure the labels match the output, even in eval mode.
self.labels = tf.slice(self.labels, [0, 0], [-1, 1])
self.labels = tf.reshape(self.labels, [-1])
logging.info('Final output=%s', outputs)
logging.info('Labels tensor=%s', self.labels)
self.output = outputs
示例2: _AddOutputs
# 需要导入模块: import shapes [as 别名]
# 或者: from shapes import tensor_dim [as 别名]
def _AddOutputs(self, prev_layer, out_dims, out_func, num_classes):
"""Adds the output layer and loss function.
Args:
prev_layer: Output of last layer of main network.
out_dims: Number of output dimensions, 0, 1 or 2.
out_func: Output non-linearity. 's' or 'c'=softmax, 'l'=logistic.
num_classes: Number of outputs/size of last output dimension.
"""
height_in = shapes.tensor_dim(prev_layer, dim=1)
logits, outputs = self._AddOutputLayer(prev_layer, out_dims, out_func,
num_classes)
if self.mode == 'train':
# Setup loss for training.
self.loss = self._AddLossFunction(logits, height_in, out_dims, out_func)
tf.scalar_summary('loss', self.loss, name='loss')
elif out_dims == 0:
# Be sure the labels match the output, even in eval mode.
self.labels = tf.slice(self.labels, [0, 0], [-1, 1])
self.labels = tf.reshape(self.labels, [-1])
logging.info('Final output=%s', outputs)
logging.info('Labels tensor=%s', self.labels)
self.output = outputs
示例3: AddFCLayer
# 需要导入模块: import shapes [as 别名]
# 或者: from shapes import tensor_dim [as 别名]
def AddFCLayer(self, prev_layer, index):
"""Parse expression and add Fully Connected Layer.
Args:
prev_layer: Input tensor.
index: Position in model_str to start parsing
Returns:
Output tensor, end index in model_str.
"""
pattern = re.compile(R'(F)(s|t|r|l|m)({\w+})?(\d+)')
m = pattern.match(self.model_str, index)
if m is None:
return None, None
fn = self._NonLinearity(m.group(2))
name = self._GetLayerName(m.group(0), index, m.group(3))
depth = int(m.group(4))
input_depth = shapes.tensor_dim(prev_layer, 1) * shapes.tensor_dim(
prev_layer, 2) * shapes.tensor_dim(prev_layer, 3)
# The slim fully connected is actually a 1x1 conv, so we have to crush the
# dimensions on input.
# Everything except batch goes to depth, and therefore has to be known.
shaped = tf.reshape(
prev_layer, [-1, input_depth], name=name + '_reshape_in')
output = slim.fully_connected(shaped, depth, activation_fn=fn, scope=name)
# Width and height are collapsed to 1.
self.reduction_factors[1] = None
self.reduction_factors[2] = None
return tf.reshape(
output, [shapes.tensor_dim(prev_layer, 0), 1, 1, depth],
name=name + '_reshape_out'), m.end()
示例4: _AddLossFunction
# 需要导入模块: import shapes [as 别名]
# 或者: from shapes import tensor_dim [as 别名]
def _AddLossFunction(self, logits, height_in, out_dims, out_func):
"""Add the appropriate loss function.
Args:
logits: Pre-softmax/logistic fully-connected output shaped to out_dims.
height_in: Height of logits before going into the softmax layer.
out_dims: Number of output dimensions, 0, 1 or 2.
out_func: Output non-linearity. 's' or 'c'=softmax, 'l'=logistic.
Returns:
loss: That which is to be minimized.
Raises:
ValueError: if logistic is used.
"""
if out_func == 'c':
# Transpose batch to the middle.
ctc_input = tf.transpose(logits, [1, 0, 2])
# Compute the widths of each batch element from the input widths.
widths = self.layers.GetLengths(dim=2, factor=height_in)
cross_entropy = tf.nn.ctc_loss(ctc_input, self.sparse_labels, widths)
elif out_func == 's':
if out_dims == 2:
self.labels = _PadLabels3d(logits, self.labels)
elif out_dims == 1:
self.labels = _PadLabels2d(
shapes.tensor_dim(
logits, dim=1), self.labels)
else:
self.labels = tf.slice(self.labels, [0, 0], [-1, 1])
self.labels = tf.reshape(self.labels, [-1])
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=self.labels, name='xent')
else:
# TODO(rays) Labels need an extra dimension for logistic, so different
# padding functions are needed, as well as a different loss function.
raise ValueError('Logistic not yet supported!')
return tf.reduce_sum(cross_entropy)
示例5: _AddLossFunction
# 需要导入模块: import shapes [as 别名]
# 或者: from shapes import tensor_dim [as 别名]
def _AddLossFunction(self, logits, height_in, out_dims, out_func):
"""Add the appropriate loss function.
Args:
logits: Pre-softmax/logistic fully-connected output shaped to out_dims.
height_in: Height of logits before going into the softmax layer.
out_dims: Number of output dimensions, 0, 1 or 2.
out_func: Output non-linearity. 's' or 'c'=softmax, 'l'=logistic.
Returns:
loss: That which is to be minimized.
Raises:
ValueError: if logistic is used.
"""
if out_func == 'c':
# Transpose batch to the middle.
ctc_input = tf.transpose(logits, [1, 0, 2])
# Compute the widths of each batch element from the input widths.
widths = self.layers.GetLengths(dim=2, factor=height_in)
cross_entropy = tf.nn.ctc_loss(ctc_input, self.sparse_labels, widths)
elif out_func == 's':
if out_dims == 2:
self.labels = _PadLabels3d(logits, self.labels)
elif out_dims == 1:
self.labels = _PadLabels2d(
shapes.tensor_dim(
logits, dim=1), self.labels)
else:
self.labels = tf.slice(self.labels, [0, 0], [-1, 1])
self.labels = tf.reshape(self.labels, [-1])
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits, self.labels, name='xent')
else:
# TODO(rays) Labels need an extra dimension for logistic, so different
# padding functions are needed, as well as a different loss function.
raise ValueError('Logistic not yet supported!')
return tf.reduce_sum(cross_entropy)