本文整理汇总了Python中tensorflow.sparse_tensor_to_dense函数的典型用法代码示例。如果您正苦于以下问题:Python sparse_tensor_to_dense函数的具体用法?Python sparse_tensor_to_dense怎么用?Python sparse_tensor_to_dense使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sparse_tensor_to_dense函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testPrintSparseTensorPassthrough
def testPrintSparseTensorPassthrough(self):
a = tf.SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], shape=[3, 4])
b = tf.SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2], shape=[3, 4])
a = tf.contrib.framework.print_op(a)
with self.test_session():
self.assertAllEqual(tf.sparse_tensor_to_dense(a).eval(),
tf.sparse_tensor_to_dense(b).eval())
示例2: parse
def parse(serialized):
"""Parse a serialized string into tensors.
Arguments:
example: a serialized `tf.train.SequenceExample` (like the one returned
from the `encode()` method).
Returns:
a tuple of 4 tensors:
`words`: 1D tensor of shape [sentence_length].
`sentence_length`: 0D tesnor (i.e. scalar) representing the sentence length.
`formula`: 1D tensor of shape [formula_length].
`formula_length`: a 0D tensor (i.e. scalar) representing the formula length
"""
features = {
SENTENCE_LENGTH_KEY: tf.FixedLenFeature([], tf.int64),
FORMULA_LENGTH_KEY: tf.FixedLenFeature([], tf.int64),
WORDS_KEY: tf.VarLenFeature(tf.int64),
FORMULA_KEY: tf.VarLenFeature(tf.int64),
}
parsed = tf.parse_single_example(
serialized=serialized,
features=features)
sentence_length = parsed[SENTENCE_LENGTH_KEY]
formula_length = parsed[FORMULA_LENGTH_KEY]
words = tf.sparse_tensor_to_dense(parsed[WORDS_KEY])
formula = tf.sparse_tensor_to_dense(parsed[FORMULA_KEY])
return words, sentence_length, formula, formula_length
示例3: build_model
def build_model(self):
dense_masker = tf.sparse_tensor_to_dense(self.mask)
with tf.name_scope('encoding'):
encoding = tf.add(tf.sparse_tensor_dense_matmul(self.X, self.W) , self.b, name= 'raw_values')
encoded_values = self.enc_func(encoding, name = 'encoded_values')
with tf.name_scope('decoding'):
decoding = tf.nn.xw_plus_b(encoded_values, self.W_prime, self.b_prime)
decoded_values = self.dec_func(decoding, name = 'decoded_values')
masked_decoded_values = tf.multiply(dense_masker, decoded_values)
with tf.name_scope('training_process'):
diff = tf.squared_difference(tf.sparse_tensor_to_dense(self.Y, default_value = 0) , decoded_values)
error = tf.reduce_sum( tf.multiply(dense_masker, diff) )
reg = 0
for param in self.params.items():
reg += tf.nn.l2_loss(param[1])* self.lambda_w
loss = error + reg
model_params = [p for p in self.params.values()]
train_step = self._optimize(loss, model_params)
tf.summary.scalar('error', error)
tf.summary.scalar('loss', loss)
for param in self.params.items():
tf.summary.histogram(param[0], param[1])
#tf.summary.histogram('predictions', decoded_values)
merged_summary = tf.summary.merge_all()
return encoded_values, decoded_values, masked_decoded_values, error, loss, train_step, merged_summary
示例4: accuracy_instance
def accuracy_instance(predictions, targets, n=[1, 2, 3, 4, 5, 10], nb_classes=5, nb_samples_per_class=10, batch_size=1):
targets = tf.cast(targets, predictions.dtype)
accuracy = tf.constant(value=0, shape=(batch_size, nb_samples_per_class), dtype=tf.float32)
indices = tf.constant(value=0, shape=(batch_size, nb_classes+1), dtype=tf.float32)
def step_((accuracy, indices), (p, t)):
"""with tf.variable_scope("Metric_step_var", reuse=True):
accuracy = tf.get_variable(name="accuracy", shape=(batch_size, nb_samples_per_class),
initializer=tf.constant_initializer(0), dtype=tf.float32)
indices = tf.get_variable(name="indices", shape=(batch_size, nb_classes + 1),
initializer=tf.constant_initializer(0), dtype=tf.float32)"""
p = tf.cast(p, tf.int32)
t = tf.cast(t, tf.int32)
##Accuracy Update
batch_range = tf.cast(tf.range(0, batch_size), dtype=tf.int32)
gather = tf.cast(tf.gather_nd(indices,tf.stack([tf.range(0,p.get_shape().as_list()[0]), t], axis=1)), tf.int32)
index = tf.cast(tf.stack([batch_range, gather], axis=1), dtype=tf.int64)
val = tf.cast(tf.equal(p, t), tf.float32)
delta = tf.SparseTensor(indices=index, values=val, dense_shape=tf.cast(accuracy.get_shape().as_list(), tf.int64))
accuracy = accuracy + tf.sparse_tensor_to_dense(delta)
##Index Update
index = tf.cast(tf.stack([batch_range, t], axis=1), dtype=tf.int64)
val = tf.constant(1.0, shape=[batch_size])
delta = tf.SparseTensor(indices=index, values=val, dense_shape=tf.cast(indices.get_shape().as_list(), dtype=tf.int64))
indices = indices + tf.sparse_tensor_to_dense(delta)
return [accuracy, indices]
示例5: _parse_example
def _parse_example(serialized_example):
"""Return inputs and targets Tensors from a serialized tf.Example."""
data_fields = {
"inputs": tf.VarLenFeature(tf.int64),
"targets": tf.VarLenFeature(tf.int64)
}
parsed = tf.parse_single_example(serialized_example, data_fields)
inputs = tf.sparse_tensor_to_dense(parsed["inputs"])
targets = tf.sparse_tensor_to_dense(parsed["targets"])
return inputs, targets
示例6: input_fn
def input_fn():
features = learn.read_batch_features(
filename, BATCH_SIZE, feature_info,
reader=tf.TFRecordReader)
target = features.pop('answer_ids')
target = utils.resize_axis(tf.sparse_tensor_to_dense(target), 1, 1)
return features, target
示例7: to_matrix
def to_matrix(sparse_indices, values, dense_shape):
sparse_tensor = tf.sparse_reorder(tf.SparseTensor(
indices=sparse_indices,
values=tf.ones(sparse_indices.get_shape().as_list()[0]),
#values=tf.reshape(values, [-1]),
dense_shape=dense_shape))
return tf.sparse_tensor_to_dense(sparse_tensor)
示例8: _slice_with_actions
def _slice_with_actions(embeddings, actions):
"""Slice a Tensor.
Take embeddings of the form [batch_size, num_actions, embed_dim]
and actions of the form [batch_size, 1], and return the sliced embeddings
like embeddings[:, actions, :].
Args:
embeddings: Tensor of embeddings to index.
actions: int Tensor to use as index into embeddings
Returns:
Tensor of embeddings indexed by actions
"""
shape = tuple(t.value for t in embeddings.get_shape())
batch_size, num_actions = shape[0], shape[1]
# Values are the 'values' in a sparse tensor we will be setting
act_indx = tf.cast(actions, tf.int64)[:, None]
values = tf.reshape(tf.cast(tf.ones(tf.shape(actions)), tf.bool), [-1])
# Create a range for each index into the batch
act_range = tf.range(0, batch_size, dtype=tf.int64)[:, None]
# Combine this into coordinates with the action indices
indices = tf.concat([act_range, act_indx], 1)
actions_mask = tf.SparseTensor(indices, values, [batch_size, num_actions])
actions_mask = tf.stop_gradient(
tf.sparse_tensor_to_dense(actions_mask, default_value=False))
sliced_emb = tf.boolean_mask(embeddings, actions_mask)
return sliced_emb
示例9: test
def test(self):
index = 0
next_idx = 20
for index in range(10):
next_idx, self.audio_features, self.audio_features_len, self.sparse_labels, wav_files = utils.next_batch(
next_idx,
1,
n_input,
n_context,
self.text_labels,
self.wav_files,
self.word_num_map)
print('读入语音文件: ', wav_files[0])
print('开始识别语音数据......')
d, train_ler = self.sess.run([self.decoded[0], self.label_err], feed_dict=self.get_feed_dict(dropout=1.0))
dense_decoded = tf.sparse_tensor_to_dense(d, default_value=-1).eval(session=self.sess)
dense_labels = utils.trans_tuple_to_texts_ch(self.sparse_labels, self.words)
for orig, decoded_array in zip(dense_labels, dense_decoded):
# 转成string
decoded_str = utils.trans_array_to_text_ch(decoded_array, self.words)
print('语音原始文本: {}'.format(orig))
print('识别出来的文本: {}'.format(decoded_str))
break
self.sess.close()
示例10: __init__
def __init__(self, config):
paths, meta = Input._collect(config.path)
self.dimension_count = meta['dimension_count']
self.sample_count = meta['sample_count']
self.batch_size = config.get('batch_size', 1)
if self.sample_count % self.batch_size > 0:
raise Exception(
('expected the number of samples ({}) to be ' +
'divisible by the batch size ({})').format(self.sample_count,
self.batch_size))
with tf.variable_scope('state'):
self.state = State()
with tf.variable_scope('source'):
paths = tf.Variable(paths, name='paths', dtype=tf.string,
trainable=False)
queue = tf.FIFOQueue(meta['path_count'], [tf.string])
enqueue = queue.enqueue_many([tf.random_shuffle(paths)])
tf.train.add_queue_runner(tf.train.QueueRunner(queue, [enqueue]))
_, record = tf.TFRecordReader().read(queue)
with tf.variable_scope('x'):
features = tf.parse_single_example(record, {
'data': tf.VarLenFeature(tf.float32),
})
data = tf.sparse_tensor_to_dense(features['data'])
if self.batch_size == 1:
self.x = tf.reshape(data, [1, -1, self.dimension_count])
else:
x = tf.reshape(data, [-1, self.dimension_count])
_, outputs = tf.contrib.training.bucket_by_sequence_length(
tf.shape(x)[0], [x], self.batch_size, config.buckets,
dynamic_pad=True)
self.x = outputs[0]
with tf.variable_scope('y'):
self.y = tf.pad(self.x[:, 1:, :], [[0, 0], [0, 1], [0, 0]])
示例11: __init__
def __init__(self,args):
super(seqMLP, self).__init__()
self.args = args
self.batch_size=args.batch_size
self.input_data = tf.placeholder(tf.float32,[self.args.batch_size,self.args.sentence_length,self.args.word_dim],name='inputdata')
self.output_data = tf.sparse_placeholder(tf.float32, name='outputdata') #[None, 114]
self.dense_outputdata= tf.sparse_tensor_to_dense(self.output_data)
self.keep_prob = tf.placeholder(tf.float32,name='keep_prob_NER')
self.entMentIndex = tf.placeholder(tf.int32,[None,5],name='ent_mention_index')
self.entCtxLeftIndex = tf.placeholder(tf.int32,[None,10],name='ent_ctxleft_index')
self.entCtxRightIndex = tf.placeholder(tf.int32,[None,10],name='ent_ctxright_index')
self.pos_f1 = tf.placeholder(tf.float32,[None,5,1])
self.pos_f2 = tf.placeholder(tf.float32,[None,10,1])
self.pos_f3 = tf.placeholder(tf.float32,[None,10,1])
self.figerHier = np.asarray(cPickle.load(open('data/figer/figerhierarchical.p','rb')),np.float32) #add the hierarchy features
self.layers={}
self.layers['fullyConnect'] = layers_lib.FullyConnection(self.args.class_size)
used = tf.sign(tf.reduce_max(tf.abs(self.input_data),reduction_indices=2))
self.length = tf.cast(tf.reduce_sum(used,reduction_indices=1),tf.int32)
with tf.device('/gpu:0'):
self.prediction,self.loss_lm = self.cl_loss_from_embedding(self.input_data)
print 'self.loss_lm:',self.loss_lm
_,self.adv_loss = self.adversarial_loss()
print 'self.adv_loss:',self.adv_loss
self.loss = tf.add(self.loss_lm,self.adv_loss)
示例12: _decode_png_instance_masks
def _decode_png_instance_masks(self, keys_to_tensors):
"""Decode PNG instance segmentation masks and stack into dense tensor.
The instance segmentation masks are reshaped to [num_instances, height,
width].
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 3-D float tensor of shape [num_instances, height, width] with values
in {0, 1}.
"""
def decode_png_mask(image_buffer):
image = tf.squeeze(
tf.image.decode_image(image_buffer, channels=1), axis=2)
image.set_shape([None, None])
image = tf.to_float(tf.greater(image, 0))
return image
png_masks = keys_to_tensors['image/object/mask']
height = keys_to_tensors['image/height']
width = keys_to_tensors['image/width']
if isinstance(png_masks, tf.SparseTensor):
png_masks = tf.sparse_tensor_to_dense(png_masks, default_value='')
return tf.cond(
tf.greater(tf.size(png_masks), 0),
lambda: tf.map_fn(decode_png_mask, png_masks, dtype=tf.float32),
lambda: tf.zeros(tf.to_int32(tf.stack([0, height, width]))))
示例13: read_data_int64
def read_data_int64(input_fname):
import pdb
with tictoc():
input_fname_queue = tf.train.string_input_producer([input_fname], num_epochs=1)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(input_fname_queue)
features = {'bit_features' : tf.VarLenFeature(tf.int64)}
parsed_example = tf.parse_single_example(serialized_example, features)
bit_features = parsed_example['bit_features']
bit_features = tf.sparse_tensor_to_dense(bit_features)
bit_features = tf.reshape(bit_features, [-1, 62])
with tf.Session() as sess:
tf.initialize_all_variables().run()
tf.initialize_local_variables().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
i = 0
while not coord.should_stop():
x = bit_features.eval()
if i % 10000 == 0: print("substance {}".format(i))
i += 1
except tf.errors.OutOfRangeError:
pass
finally:
coord.request_stop()
coord.join(threads)
示例14: tensors_to_item
def tensors_to_item(self, keys_to_tensors):
"""Maps the given dictionary of tensors to a concatenated list of bboxes.
Args:
keys_to_tensors: a mapping of TF-Example keys to parsed tensors.
Returns:
[time, num_boxes, 4] tensor of bounding box coordinates, in order
[y_min, x_min, y_max, x_max]. Whether the tensor is a SparseTensor
or a dense Tensor is determined by the return_dense parameter. Empty
positions in the sparse tensor are filled with -1.0 values.
"""
sides = []
for key in self._full_keys:
value = keys_to_tensors[key]
expanded_dims = tf.concat(
[tf.to_int64(tf.shape(value)),
tf.constant([1], dtype=tf.int64)], 0)
side = tf.sparse_reshape(value, expanded_dims)
sides.append(side)
bounding_boxes = tf.sparse_concat(2, sides)
if self._return_dense:
bounding_boxes = tf.sparse_tensor_to_dense(
bounding_boxes, default_value=self._default_value)
return bounding_boxes
示例15: unpool_layer2x2
def unpool_layer2x2(self, x, raveled_argmax, out_shape):
argmax = self.unravel_argmax(raveled_argmax, tf.to_int64(out_shape))
output = tf.zeros([out_shape[1], out_shape[2], out_shape[3]])
height = tf.shape(output)[0]
width = tf.shape(output)[1]
channels = tf.shape(output)[2]
t1 = tf.to_int64(tf.range(channels))
t1 = tf.tile(t1, [((width + 1) // 2) * ((height + 1) // 2)])
t1 = tf.reshape(t1, [-1, channels])
t1 = tf.transpose(t1, perm=[1, 0])
t1 = tf.reshape(t1, [channels, (height + 1) // 2, (width + 1) // 2, 1])
t2 = tf.squeeze(argmax)
t2 = tf.pack((t2[0], t2[1]), axis=0)
t2 = tf.transpose(t2, perm=[3, 1, 2, 0])
t = tf.concat(3, [t2, t1])
indices = tf.reshape(t, [((height + 1) // 2) * ((width + 1) // 2) * channels, 3])
x1 = tf.squeeze(x)
x1 = tf.reshape(x1, [-1, channels])
x1 = tf.transpose(x1, perm=[1, 0])
values = tf.reshape(x1, [-1])
delta = tf.SparseTensor(indices, values, tf.to_int64(tf.shape(output)))
return tf.expand_dims(tf.sparse_tensor_to_dense(tf.sparse_reorder(delta)), 0)