本文整理汇总了Python中tensorflow.sparse_tensor_to_dense方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.sparse_tensor_to_dense方法的具体用法?Python tensorflow.sparse_tensor_to_dense怎么用?Python tensorflow.sparse_tensor_to_dense使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.sparse_tensor_to_dense方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _reshape_instance_masks
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_to_dense [as 别名]
def _reshape_instance_masks(self, keys_to_tensors):
"""Reshape instance segmentation masks.
The instance segmentation masks are reshaped to [num_instances, height,
width] and cast to boolean type to save memory.
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 3-D boolean tensor of shape [num_instances, height, width].
"""
masks = keys_to_tensors['image/segmentation/object']
if isinstance(masks, tf.SparseTensor):
masks = tf.sparse_tensor_to_dense(masks)
height = keys_to_tensors['image/height']
width = keys_to_tensors['image/width']
to_shape = tf.cast(tf.stack([-1, height, width]), tf.int32)
return tf.cast(tf.reshape(masks, to_shape), tf.bool)
示例2: _reshape_keypoints
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_to_dense [as 别名]
def _reshape_keypoints(self, keys_to_tensors):
"""Reshape keypoints.
The instance segmentation masks are reshaped to [num_instances,
num_keypoints, 2].
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 3-D float tensor of shape [num_instances, num_keypoints, 2] with values
in {0, 1}.
"""
y = keys_to_tensors['image/object/keypoint/y']
if isinstance(y, tf.SparseTensor):
y = tf.sparse_tensor_to_dense(y)
y = tf.expand_dims(y, 1)
x = keys_to_tensors['image/object/keypoint/x']
if isinstance(x, tf.SparseTensor):
x = tf.sparse_tensor_to_dense(x)
x = tf.expand_dims(x, 1)
keypoints = tf.concat([y, x], 1)
keypoints = tf.reshape(keypoints, [-1, self._num_keypoints, 2])
return keypoints
示例3: _reshape_instance_masks
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_to_dense [as 别名]
def _reshape_instance_masks(self, keys_to_tensors):
"""Reshape instance segmentation masks.
The instance segmentation masks are reshaped to [num_instances, height,
width].
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 3-D float tensor of shape [num_instances, height, width] with values
in {0, 1}.
"""
height = keys_to_tensors['image/height']
width = keys_to_tensors['image/width']
to_shape = tf.cast(tf.stack([-1, height, width]), tf.int32)
masks = keys_to_tensors['image/object/mask']
if isinstance(masks, tf.SparseTensor):
masks = tf.sparse_tensor_to_dense(masks)
masks = tf.reshape(tf.to_float(tf.greater(masks, 0.0)), to_shape)
return tf.cast(masks, tf.float32)
示例4: multiple_content_lookup
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_to_dense [as 别名]
def multiple_content_lookup(content, vocab_table, ids, name=None):
"""
:param content:
:param vocab_table:
:param ids:
:param name:
:return: 2-D [batch_size, max_length_in_batch] content id matrix,
1-D [batch_size] content len vector
"""
with tf.name_scope(name, 'multiple_content_lookup', [content, vocab_table, ids]):
content_list = tf.nn.embedding_lookup(content, ids)
extracted_sparse_content = tf.string_split(content_list, delimiter=' ')
sparse_content = tf.SparseTensor(indices=extracted_sparse_content.indices,
values=vocab_table.lookup(extracted_sparse_content.values),
dense_shape=extracted_sparse_content.dense_shape)
extracted_content_ids = tf.sparse_tensor_to_dense(sparse_content,
default_value=0, name='dense_content')
extracted_content_len = tf.reduce_sum(tf.cast(tf.not_equal(extracted_content_ids, 0), tf.int32), axis=-1)
return extracted_content_ids, extracted_content_len
示例5: entity_content_embedding_lookup
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_to_dense [as 别名]
def entity_content_embedding_lookup(entities, content, content_len, vocab_table, word_embedding, str_pad, name=None):
""" Lookup entity word embeddings given a flatten 1-D entity id list and content lookup table
:param entities: Must be a 1-D entity vector
:param content:
:param content_len:
:param vocab_table:
:param word_embedding:
:param str_pad:
:param name:
:return:
"""
with tf.device('/cpu:0'):
with tf.name_scope(name, 'entity_content_lookup',
[entities, content, content_len, vocab_table, word_embedding]):
ent_content = tf.string_split(tf.nn.embedding_lookup(content, entities, name='ent_content'), delimiter=' ')
content_len = tf.nn.embedding_lookup(content_len, entities, name='ent_content_len')
ent_content_dense = tf.sparse_tensor_to_dense(ent_content,
default_value=str_pad,
name='ent_content_dense')
ent_embedding = tf.nn.embedding_lookup(word_embedding,
vocab_table.lookup(ent_content_dense,
name='ent_content_ids'))
return ent_embedding, content_len
示例6: _reshape_instance_masks
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_to_dense [as 别名]
def _reshape_instance_masks(self, keys_to_tensors):
"""Reshape instance segmentation masks.
The instance segmentation masks are reshaped to [num_instances, height,
width] and cast to boolean type to save memory.
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 3-D float tensor of shape [num_instances, height, width] with values
in {0, 1}.
"""
height = keys_to_tensors['image/height']
width = keys_to_tensors['image/width']
to_shape = tf.cast(tf.stack([-1, height, width]), tf.int32)
masks = keys_to_tensors['image/object/mask']
if isinstance(masks, tf.SparseTensor):
masks = tf.sparse_tensor_to_dense(masks)
masks = tf.reshape(tf.to_float(tf.greater(masks, 0.0)), to_shape)
return tf.cast(masks, tf.float32)
示例7: to_dense
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_to_dense [as 别名]
def to_dense(tensor):
"""Converts a sparse tensor into a dense tensor and returns it.
# Arguments
tensor: A tensor instance (potentially sparse).
# Returns
A dense tensor.
# Examples
```python
>>> from keras import backend as K
>>> b = K.placeholder((2, 2), sparse=True)
>>> print(K.is_sparse(b))
True
>>> c = K.to_dense(b)
>>> print(K.is_sparse(c))
False
```
"""
if is_sparse(tensor):
return tf.sparse_tensor_to_dense(tensor)
else:
return tensor
示例8: testCwiseDivAndMul
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_to_dense [as 别名]
def testCwiseDivAndMul(self):
np.random.seed(1618)
sp_shapes = [(10, 10, 10), (5, 5), (1618,), (3, 3, 7)]
dense_shapes = [(10, 10, 1), (5, 5), (1,), (1, 7)]
with self.test_session(use_gpu=False):
for dtype in [np.float32, np.float64, np.int32, np.int64]:
for sp_shape, dense_shape in zip(sp_shapes, dense_shapes):
sp_vals_np = np.random.rand(*sp_shape).astype(dtype) + 1
dense_vals_np = np.random.rand(*dense_shape).astype(dtype) + 1
sp_t, unused_nnz = _sparsify(sp_vals_np, thresh=1.5)
sp_t_densified = sparse_ops.sparse_tensor_to_dense(sp_t).eval()
dense_t = tf.constant(dense_vals_np)
self._check(sp_t / dense_t, sp_t_densified / dense_vals_np, sp_t)
# Check commutative.
self._check(sp_t * dense_t, sp_t_densified * dense_vals_np, sp_t)
self._check(dense_t * sp_t, sp_t_densified * dense_vals_np, sp_t)
if dtype in [np.int32, np.int64]:
res = sp_t / dense_t # should invoke "__truediv__"
self.assertEqual(res.values.eval().dtype, np.float64)
示例9: testRandom
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_to_dense [as 别名]
def testRandom(self):
np.random.seed(1618)
shapes = [(13,), (6, 8), (1, 7, 1)]
for shape in shapes:
for dtype in [np.int32, np.int64, np.float16, np.float32, np.float64]:
a_np = np.random.randn(*shape).astype(dtype)
b_np = np.random.randn(*shape).astype(dtype)
sp_a, unused_a_nnz = _sparsify(a_np, thresh=-.5)
sp_b, unused_b_nnz = _sparsify(b_np, thresh=-.5)
with self.test_session(use_gpu=False):
maximum_tf = tf.sparse_maximum(sp_a, sp_b)
maximum_tf_densified = tf.sparse_tensor_to_dense(maximum_tf).eval()
minimum_tf = tf.sparse_minimum(sp_a, sp_b)
minimum_tf_densified = tf.sparse_tensor_to_dense(minimum_tf).eval()
a_densified = tf.sparse_tensor_to_dense(sp_a).eval()
b_densified = tf.sparse_tensor_to_dense(sp_b).eval()
self.assertAllEqual(
np.maximum(a_densified, b_densified), maximum_tf_densified)
self.assertAllEqual(
np.minimum(a_densified, b_densified), minimum_tf_densified)
示例10: get_label
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_to_dense [as 别名]
def get_label(self, text, null_character=u'\u2591'):
""" Returns the ids of the corresponding text,
Args:
text: a tensor with shape [batch_size, lexicon_size]
and type string
null_character: a unicode character used to replace '<null>'
character. the default value is a light shade block '░'.
"""
batch_size = text.shape[0].value
lexicon_size = text.shape[1].value
text = tf.reshape(text, [-1])
sp_text = tf.string_split(text, delimiter='')
sp_text = tf.sparse_reset_shape(sp_text, [batch_size*lexicon_size,
self.max_sequence_length])
sp_text = tf.sparse_tensor_to_dense(sp_text, default_value=null_character)
ids = self.invert_table.lookup(sp_text)
ids = tf.reshape(ids, [batch_size, lexicon_size, self.max_sequence_length])
return tf.to_int32(ids)
示例11: recon_wav_file
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_to_dense [as 别名]
def recon_wav_file(self, wav_files, txt_labels):
self.audio_features, self.audio_features_len, text_vector, text_vector_len = utils.get_audio_mfcc_features(
None,
wav_files,
self.hyparam.n_input,
self.hyparam.n_context,
self.word_num_map,
txt_labels,
specgram_type=self.hyparam.specgram_type)
self.sparse_labels = utils.sparse_tuple_from(text_vector)
prob, d, train_ler = self.sess.run([self.prob, self.decoded[0], self.label_err], feed_dict=self.get_feed_dict(dropout=1.0))
if self.hyparam.use_lm_decoder:
result_transcripts = self.lm_decode(prob)
else:
dense_decoded = tf.sparse_tensor_to_dense(d, default_value=-1).eval(session=self.sess)
result_transcripts = utils.trans_array_to_text_ch(dense_decoded[0], self.words).encode('utf-8')
# print "Transcript: ", result_transcripts
return result_transcripts
# self.sess.close()
示例12: process_input
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_to_dense [as 别名]
def process_input(self, session, inputs, input_seq_lengths, run_options=None, run_metadata=None):
"""
Returns:
Next char
"""
input_feed = {self.inputs_ph: np.array(inputs), self.input_seq_lengths_ph: np.array(input_seq_lengths)}
if (self.input_keep_prob_ph is not None) and (self.output_keep_prob_ph is not None):
input_feed[self.input_keep_prob_ph] = 1.0
input_feed[self.output_keep_prob_ph] = 1.0
output_feed = [self.prediction]
outputs = session.run(output_feed, input_feed, options=run_options, run_metadata=run_metadata)
predictions = session.run(tf.sparse_tensor_to_dense(outputs[0], default_value=self.num_labels,
validate_indices=True),
options=run_options, run_metadata=run_metadata)
return predictions
示例13: process_input
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_to_dense [as 别名]
def process_input(self, session, inputs, input_seq_lengths, run_options=None, run_metadata=None):
"""
Returns:
Output vector
"""
input_feed = {self.inputs_ph: np.array(inputs), self.input_seq_lengths_ph: np.array(input_seq_lengths)}
if (self.input_keep_prob_ph is not None) and (self.output_keep_prob_ph is not None):
input_feed[self.input_keep_prob_ph] = 1.0
input_feed[self.output_keep_prob_ph] = 1.0
output_feed = [self.prediction]
outputs = session.run(output_feed, input_feed, options=run_options, run_metadata=run_metadata)
predictions = session.run(tf.sparse_tensor_to_dense(outputs[0], default_value=self.num_labels,
validate_indices=True),
options=run_options, run_metadata=run_metadata)
return predictions
示例14: parse_example_batch
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import sparse_tensor_to_dense [as 别名]
def parse_example_batch(serialized):
"""Parses a batch of tf.Example protos.
Args:
serialized: A 1-D string Tensor; a batch of serialized tf.Example protos.
Returns:
encode: A SentenceBatch of encode sentences.
decode_pre: A SentenceBatch of "previous" sentences to decode.
decode_post: A SentenceBatch of "post" sentences to decode.
"""
features = tf.parse_example(
serialized,
features={
"encode": tf.VarLenFeature(dtype=tf.int64),
"decode_pre": tf.VarLenFeature(dtype=tf.int64),
"decode_post": tf.VarLenFeature(dtype=tf.int64),
})
def _sparse_to_batch(sparse):
ids = tf.sparse_tensor_to_dense(sparse) # Padding with zeroes.
mask = tf.sparse_to_dense(sparse.indices, sparse.dense_shape,
tf.ones_like(sparse.values, dtype=tf.int32))
return SentenceBatch(ids=ids, mask=mask)
output_names = ("encode", "decode_pre", "decode_post")
return tuple(_sparse_to_batch(features[x]) for x in output_names)