本文整理汇总了Python中sugartensor.sg_intx方法的典型用法代码示例。如果您正苦于以下问题:Python sugartensor.sg_intx方法的具体用法?Python sugartensor.sg_intx怎么用?Python sugartensor.sg_intx使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sugartensor
的用法示例。
在下文中一共展示了sugartensor.sg_intx方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sg_ctc
# 需要导入模块: import sugartensor [as 别名]
# 或者: from sugartensor import sg_intx [as 别名]
def sg_ctc(tensor, opt):
r"""Computes the CTC (Connectionist Temporal Classification) Loss between `tensor` and `target`.
Args:
tensor: A 3-D `float Tensor`.
opt:
target: A `Tensor` with the same length in the first dimension as the `tensor`. Labels. ( Dense tensor )
name: A `string`. A name to display in the tensor board web UI.
Returns:
A 1-D `Tensor` with the same length in the first dimension of the `tensor`.
For example,
```
tensor = [[[2., -1., 3.], [3., 1., -2.]], [[1., -1., 2.], [3., 1., -2.]]]
target = [[2., 1.], [2., 3.]]
tensor.sg_ctc(target=target) => [ 4.45940781 2.43091154]
```
"""
assert opt.target is not None, 'target is mandatory.'
# default sequence length
shape = tf.shape(tensor)
opt += tf.sg_opt(seq_len=tf.ones((shape[0],), dtype=tf.sg_intx) * shape[1], merge=True)
# ctc loss
out = tf.nn.ctc_loss(opt.target.sg_to_sparse(), tensor, opt.seq_len,
ctc_merge_repeated=opt.merge, time_major=False)
out = tf.identity(out, 'ctc')
# add summary
tf.sg_summary_loss(out, name=opt.name)
return out
示例2: sg_int
# 需要导入模块: import sugartensor [as 别名]
# 或者: from sugartensor import sg_intx [as 别名]
def sg_int(tensor, opt):
r"""Casts a tensor to intx.
See `tf.cast()` in tensorflow.
Args:
tensor: A `Tensor` or `SparseTensor` (automatically given by chain).
opt:
name: If provided, it replaces current tensor's name.
Returns:
A `Tensor` or `SparseTensor` with same shape as `tensor`.
"""
return tf.cast(tensor, tf.sg_intx, name=opt.name)
示例3: __init__
# 需要导入模块: import sugartensor [as 别名]
# 或者: from sugartensor import sg_intx [as 别名]
def __init__(self, batch_size=16, set_name='train'):
# load meta file
label, mfcc_file = [], []
with open(_data_path + 'preprocess/meta/%s.csv' % set_name) as csv_file:
reader = csv.reader(csv_file, delimiter=',')
for row in reader:
# mfcc file
mfcc_file.append(_data_path + 'preprocess/mfcc/' + row[0] + '.npy')
# label info ( convert to string object for variable-length support )
label.append(np.asarray(row[1:], dtype=np.int).tostring())
# to constant tensor
label_t = tf.convert_to_tensor(label)
mfcc_file_t = tf.convert_to_tensor(mfcc_file)
# create queue from constant tensor
label_q, mfcc_file_q \
= tf.train.slice_input_producer([label_t, mfcc_file_t], shuffle=True)
# create label, mfcc queue
label_q, mfcc_q = _load_mfcc(source=[label_q, mfcc_file_q],
dtypes=[tf.sg_intx, tf.sg_floatx],
capacity=256, num_threads=64)
# create batch queue with dynamic pad
batch_queue = tf.train.batch([label_q, mfcc_q], batch_size,
shapes=[(None,), (20, None)],
num_threads=64, capacity=batch_size*32,
dynamic_pad=True)
# split data
self.label, self.mfcc = batch_queue
# batch * time * dim
self.mfcc = self.mfcc.sg_transpose(perm=[0, 2, 1])
# calc total batch count
self.num_batch = len(label) // batch_size
# print info
tf.sg_info('%s set loaded.(total data=%d, total batch=%d)'
% (set_name.upper(), len(label), self.num_batch))