本文整理匯總了Python中sugartensor.sg_intx方法的典型用法代碼示例。如果您正苦於以下問題:Python sugartensor.sg_intx方法的具體用法?Python sugartensor.sg_intx怎麽用?Python sugartensor.sg_intx使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sugartensor
的用法示例。
在下文中一共展示了sugartensor.sg_intx方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: sg_ctc
# 需要導入模塊: import sugartensor [as 別名]
# 或者: from sugartensor import sg_intx [as 別名]
def sg_ctc(tensor, opt):
r"""Computes the CTC (Connectionist Temporal Classification) Loss between `tensor` and `target`.
Args:
tensor: A 3-D `float Tensor`.
opt:
target: A `Tensor` with the same length in the first dimension as the `tensor`. Labels. ( Dense tensor )
name: A `string`. A name to display in the tensor board web UI.
Returns:
A 1-D `Tensor` with the same length in the first dimension of the `tensor`.
For example,
```
tensor = [[[2., -1., 3.], [3., 1., -2.]], [[1., -1., 2.], [3., 1., -2.]]]
target = [[2., 1.], [2., 3.]]
tensor.sg_ctc(target=target) => [ 4.45940781 2.43091154]
```
"""
assert opt.target is not None, 'target is mandatory.'
# default sequence length
shape = tf.shape(tensor)
opt += tf.sg_opt(seq_len=tf.ones((shape[0],), dtype=tf.sg_intx) * shape[1], merge=True)
# ctc loss
out = tf.nn.ctc_loss(opt.target.sg_to_sparse(), tensor, opt.seq_len,
ctc_merge_repeated=opt.merge, time_major=False)
out = tf.identity(out, 'ctc')
# add summary
tf.sg_summary_loss(out, name=opt.name)
return out
示例2: sg_int
# 需要導入模塊: import sugartensor [as 別名]
# 或者: from sugartensor import sg_intx [as 別名]
def sg_int(tensor, opt):
r"""Casts a tensor to intx.
See `tf.cast()` in tensorflow.
Args:
tensor: A `Tensor` or `SparseTensor` (automatically given by chain).
opt:
name: If provided, it replaces current tensor's name.
Returns:
A `Tensor` or `SparseTensor` with same shape as `tensor`.
"""
return tf.cast(tensor, tf.sg_intx, name=opt.name)
示例3: __init__
# 需要導入模塊: import sugartensor [as 別名]
# 或者: from sugartensor import sg_intx [as 別名]
def __init__(self, batch_size=16, set_name='train'):
# load meta file
label, mfcc_file = [], []
with open(_data_path + 'preprocess/meta/%s.csv' % set_name) as csv_file:
reader = csv.reader(csv_file, delimiter=',')
for row in reader:
# mfcc file
mfcc_file.append(_data_path + 'preprocess/mfcc/' + row[0] + '.npy')
# label info ( convert to string object for variable-length support )
label.append(np.asarray(row[1:], dtype=np.int).tostring())
# to constant tensor
label_t = tf.convert_to_tensor(label)
mfcc_file_t = tf.convert_to_tensor(mfcc_file)
# create queue from constant tensor
label_q, mfcc_file_q \
= tf.train.slice_input_producer([label_t, mfcc_file_t], shuffle=True)
# create label, mfcc queue
label_q, mfcc_q = _load_mfcc(source=[label_q, mfcc_file_q],
dtypes=[tf.sg_intx, tf.sg_floatx],
capacity=256, num_threads=64)
# create batch queue with dynamic pad
batch_queue = tf.train.batch([label_q, mfcc_q], batch_size,
shapes=[(None,), (20, None)],
num_threads=64, capacity=batch_size*32,
dynamic_pad=True)
# split data
self.label, self.mfcc = batch_queue
# batch * time * dim
self.mfcc = self.mfcc.sg_transpose(perm=[0, 2, 1])
# calc total batch count
self.num_batch = len(label) // batch_size
# print info
tf.sg_info('%s set loaded.(total data=%d, total batch=%d)'
% (set_name.upper(), len(label), self.num_batch))