本文整理汇总了Python中tensorflow.size函数的典型用法代码示例。如果您正苦于以下问题:Python size函数的具体用法?Python size怎么用?Python size使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了size函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: disjunction_of_literals
def disjunction_of_literals(literals, label="no_label"):
list_of_literal_tensors = [lit.tensor for lit in literals]
literals_tensor = tf.concat(1,list_of_literal_tensors)
if default_tnorm == "product":
result = 1.0-tf.reduce_prod(1.0-literals_tensor, 1, keep_dims=True)
if default_tnorm == "yager2":
result = tf.minimum(1.0, tf.sqrt(tf.reduce_sum(tf.square(literals_tensor), 1, keep_dims=True)))
if default_tnorm == "luk":
print "data aggregator is lukas"
result = tf.minimum(1.0, tf.reduce_sum(literals_tensor, 1, keep_dims=True))
PR(result)
if default_tnorm == "goedel":
result = tf.reduce_max(literals_tensor, 1, keep_dims=True, name=label)
if default_aggregator == "product":
return tf.reduce_prod(result, keep_dims=True)
if default_aggregator == "mean":
print "data aggregator is mean"
return tf.reduce_mean(result, keep_dims=True, name=label)
if default_aggregator == "gmean":
return tf.exp(tf.mul(tf.reduce_sum(tf.log(result), keep_dims=True),
tf.inv(tf.to_float(tf.size(result)))), name=label)
if default_aggregator == "hmean":
print "data aggregator is hmean"
return tf.div(tf.to_float(tf.size(result)), tf.reduce_sum(tf.inv(result), keep_dims=True))
if default_aggregator == "min":
print "data aggregator is min"
return tf.reduce_min(result, keep_dims=True, name=label)
if default_aggregator == "qmean":
print "data aggregator is qmean"
return tf.sqrt(tf.reduce_mean(tf.square(result), keep_dims=True), name=label)
if default_aggregator == "cmean":
print "data aggregator is cmean"
return tf.pow(tf.reduce_mean(tf.pow(result, 3), keep_dims=True), tf.inv(tf.to_float(3)), name=label)
示例2: init
def init(self):
# init
self.global_step = global_step = tf.Variable(0, trainable=False, name='global_step')
self.learning_rate = learning_rate = tf.train.exponential_decay(1e-2, global_step, 500, 0.95, staircase=True)
# Load classes
src_table = tf.contrib.lookup.index_table_from_file('./iwslt15/vocab.en', default_value=0)
tgt_table = tf.contrib.lookup.index_table_from_file('./iwslt15/vocab.vi', default_value=0)
#src_table_size = src_table.size()
#tgt_table_size = tgt_table.size()
src_table_size = 17191
tgt_table_size = 7709
src_eos_id = tf.cast(src_table.lookup(tf.constant('</s>')), tf.int64)
self.tgt_eos_id = tgt_eos_id = tf.cast(tgt_table.lookup(tf.constant('</s>')), tf.int64)
self.tgt_sos_id = tgt_sos_id = tf.cast(tgt_table.lookup(tf.constant('<s>')), tf.int64)
# file placeholder
src_files = tf.placeholder(tf.string, shape=[None])
tgt_files = tf.placeholder(tf.string, shape=[None])
# Read data
src_dataset = tf.contrib.data.TextLineDataset(src_files)
tgt_dataset = tf.contrib.data.TextLineDataset(tgt_files)
# Convert data to word indices
src_dataset = src_dataset.map(lambda string: tf.concat([['<s>'], tf.string_split([string]).values, ['</s>']], 0))
src_dataset = src_dataset.map(lambda words: (words, tf.size(words)))
src_dataset = src_dataset.map(lambda words, size: (src_table.lookup(words), size))
tgt_dataset = tgt_dataset.map(lambda string: tf.concat([['<s>'], tf.string_split([string]).values, ['</s>']], 0))
tgt_dataset = tgt_dataset.map(lambda words: (words, tf.size(words)))
tgt_dataset = tgt_dataset.map(lambda words, size: (tgt_table.lookup(words), size))
# zip data
dataset = tf.contrib.data.Dataset.zip((src_dataset, tgt_dataset))
# batch
batched_dataset = dataset.padded_batch(self.batch_size,
padded_shapes=((tf.TensorShape([None]), tf.TensorShape([])),(tf.TensorShape([None]), tf.TensorShape([]))),
padding_values=((src_eos_id, 0), (tgt_eos_id, 0)))
batched_iterator = batched_dataset.make_initializable_iterator()
((source, source_lengths), (target, target_lengths)) = batched_iterator.get_next()
self.target = target
self.target_lengths = target_lengths
self.source_lengths = source_lengths
# Load embedding (dic limits to 100000)
src_embed = tf.Variable(tf.random_normal([100000, self.embed_vector_size], stddev=0.1))
self.tgt_embed = tgt_embed = tf.Variable(tf.random_normal([100000, self.embed_vector_size], stddev=0.1))
self.src_lookup = src_lookup = tf.nn.embedding_lookup(src_embed, source)
self.tgt_lookup = tgt_lookup = tf.nn.embedding_lookup(tgt_embed, target)
# Projection Layer
self.projection_layer = projection_layer = layers_core.Dense(tgt_table_size)
return batched_iterator, src_files, tgt_files
示例3: style_loss
def style_loss(CNN_structure, const_layers, var_layers, content_segs, style_segs, weight):
loss_styles = []
layer_count = float(len(const_layers))
layer_index = 0
_, content_seg_height, content_seg_width, _ = content_segs[0].get_shape().as_list()
_, style_seg_height, style_seg_width, _ = style_segs[0].get_shape().as_list()
for layer_name in CNN_structure:
layer_name = layer_name[layer_name.find("/") + 1:]
# downsampling segmentation
if "pool" in layer_name:
content_seg_width, content_seg_height = int(math.ceil(content_seg_width / 2)), int(math.ceil(content_seg_height / 2))
style_seg_width, style_seg_height = int(math.ceil(style_seg_width / 2)), int(math.ceil(style_seg_height / 2))
for i in xrange(len(content_segs)):
content_segs[i] = tf.image.resize_bilinear(content_segs[i], tf.constant((content_seg_height, content_seg_width)))
style_segs[i] = tf.image.resize_bilinear(style_segs[i], tf.constant((style_seg_height, style_seg_width)))
elif "conv" in layer_name:
for i in xrange(len(content_segs)):
# have some differences on border with torch
content_segs[i] = tf.nn.avg_pool(tf.pad(content_segs[i], [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT"), \
ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='VALID')
style_segs[i] = tf.nn.avg_pool(tf.pad(style_segs[i], [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT"), \
ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='VALID')
if layer_name == var_layers[layer_index].name[var_layers[layer_index].name.find("/") + 1:]:
print("Setting up style layer: <{}>".format(layer_name))
const_layer = const_layers[layer_index]
var_layer = var_layers[layer_index]
layer_index = layer_index + 1
layer_style_loss = 0.0
for content_seg, style_seg in zip(content_segs, style_segs):
gram_matrix_const = gram_matrix(tf.multiply(const_layer, style_seg))
style_mask_mean = tf.reduce_mean(style_seg)
gram_matrix_const = tf.cond(tf.greater(style_mask_mean, 0.),
lambda: gram_matrix_const / (tf.to_float(tf.size(const_layer)) * style_mask_mean),
lambda: gram_matrix_const
)
gram_matrix_var = gram_matrix(tf.multiply(var_layer, content_seg))
content_mask_mean = tf.reduce_mean(content_seg)
gram_matrix_var = tf.cond(tf.greater(content_mask_mean, 0.),
lambda: gram_matrix_var / (tf.to_float(tf.size(var_layer)) * content_mask_mean),
lambda: gram_matrix_var
)
diff_style_sum = tf.reduce_mean(tf.squared_difference(gram_matrix_const, gram_matrix_var)) * content_mask_mean
layer_style_loss += diff_style_sum
loss_styles.append(layer_style_loss * weight)
return loss_styles
示例4: _compareSize
def _compareSize(self, x, use_gpu=False):
np_ans = np.asarray(np.size(x))
with self.test_session(use_gpu=use_gpu):
tf_ans = tf.size(x)
result = tf_ans.eval()
tf_ans_64 = tf.size(x, out_type=tf.int64)
result_64 = tf_ans_64.eval()
self.assertAllEqual(np_ans, result)
self.assertAllEqual(np_ans, result_64)
self.assertShapeEqual(np_ans, tf_ans)
示例5: testSparseShape
def testSparseShape(self):
with self.test_session():
sp_value = tf.SparseTensorValue(indices=((0, 1), (1, 0)), values=(42, 24), shape=(2, 2))
self.assertAllEqual((2, 2), tf.shape(sp_value).eval())
self.assertEqual(4, tf.size(sp_value).eval())
self.assertEqual(2, tf.rank(sp_value).eval())
sp = tf.SparseTensor.from_value(sp_value)
self.assertAllEqual((2, 2), tf.shape(sp).eval())
self.assertEqual(4, tf.size(sp).eval())
self.assertEqual(2, tf.rank(sp).eval())
示例6: testDenseShape
def testDenseShape(self):
with self.test_session():
t_value = [[0, 42], [24, 0]]
self.assertAllEqual((2, 2), tf.shape(t_value).eval())
self.assertEqual(4, tf.size(t_value).eval())
self.assertEqual(2, tf.rank(t_value).eval())
t = tf.constant(t_value)
self.assertAllEqual((2, 2), tf.shape(t).eval())
self.assertEqual(4, tf.size(t).eval())
self.assertEqual(2, tf.rank(t).eval())
示例7: cross_add
def cross_add(a, b):
'''
:param a: 1-D tensor
:param b: 1-D tensor
:return: 1-D tensor
'''
a_len = tf.reshape(tf.size(a), [1])
b_len = tf.reshape(tf.size(b), [1])
aa = tf.transpose(tf.reshape(tf.tile(a, b_len), shape=tf.concat(0, [b_len, a_len])))
ab_sum = tf.reshape(tf.add(aa, b), shape=tf.mul(a_len, b_len))
return ab_sum
示例8: create_seed
def create_seed(filename,
sample_rate,
quantization_channels,
window_size=WINDOW):
audio, _ = librosa.load(filename, sr=sample_rate, mono=True)
audio = audio_reader.trim_silence(audio)
quantized = mu_law_encode(audio, quantization_channels)
cut_index = tf.cond(tf.size(quantized) < tf.constant(window_size),
lambda: tf.size(quantized),
lambda: tf.constant(window_size))
return quantized[:cut_index]
示例9: lstm
def lstm(xs, l, size, num_layers, initial_state=None):
batch_size = tf.size(xs)[0]
n = tf.size(xs)[-1]
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(size, forget_bias=0.0)
#add dropout
cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * num_layers)
if initial_state == None:
initial_state = cell.zero_state(batch_size, data_type=tf.float32)
inputs = tf.one_hot(xs, n)
# inputs = [tf.squeeze(input_, [1])
# for input_ in tf.split(1, num_steps, inputs)]
outputs, _ = rnn.rnn(cell, inputs, initial_state=initial_state)
#state
return outputs
示例10: main
def main(argv=None):
style_paths = FLAGS.STYLE_IMAGES.split(',')
style_layers = FLAGS.STYLE_LAYERS.split(',')
content_path = FLAGS.CONTENT_IMAGE
content_layers = FLAGS.CONTENT_LAYERS.split(',')
style_features_t = get_style_features(style_paths, style_layers)
res = get_content_features(content_path, content_layers)
content_features_t, image_t = res[:-1], res[-1]
image = tf.constant(image_t)
random = tf.random_normal(image_t.shape)
initial = tf.Variable(random if FLAGS.RANDOM_INIT else image)
net, _ = vgg.net(FLAGS.VGG_PATH, initial)
content_loss = 0
for content_features, layer in zip(content_features_t, content_layers):
layer_size = tf.size(content_features)
content_loss += tf.nn.l2_loss(net[layer] - content_features) / tf.to_float(layer_size)
content_loss = FLAGS.CONTENT_WEIGHT * content_loss / len(content_layers)
style_loss = 0
for style_gram, layer in zip(style_features_t, style_layers):
layer_size = tf.size(style_gram)
style_loss += tf.nn.l2_loss(gram(net[layer]) - style_gram) / tf.to_float(layer_size)
#style_loss += tf.sqrt(tf.reduce_sum(tf.pow(gram(net[layer]) - style_gram, 2)))
style_loss = FLAGS.STYLE_WEIGHT * style_loss
tv_loss = FLAGS.TV_WEIGHT * total_variation_loss(initial)
total_loss = content_loss + style_loss + tv_loss
train_op = tf.train.AdamOptimizer(FLAGS.LEARNING_RATE).minimize(total_loss)
output_image = tf.image.encode_png(tf.saturate_cast(tf.squeeze(initial) + reader.mean_pixel, tf.uint8))
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
start_time = time.time()
for step in range(FLAGS.NUM_ITERATIONS):
_, loss_t, cl, sl = sess.run([train_op, total_loss, content_loss, style_loss])
elapsed = time.time() - start_time
start_time = time.time()
print(step, elapsed, loss_t, cl, sl)
image_t = sess.run(output_image)
with open('out.png', 'wb') as f:
f.write(image_t)
示例11: get_train
def get_train(train_ph_dict,var_dict,var_ph_dict):
mid0 = tf.one_hot(train_ph_dict['choice_0'], 9, axis=-1, dtype=tf.float32)
mid0 = mid0 * get_q(train_ph_dict['state_0'],var_dict)
mid0 = tf.reduce_sum(mid0, reduction_indices=[1])
mid1 = get_q(train_ph_dict['state_1'],var_ph_dict)
mid1 = tf.reduce_max(mid1, reduction_indices=[1])
mid1 = mid1 * train_ph_dict['cont']
mid1 = mid1 * tf.constant(TRAIN_BETA)
l2r = tf.constant(0.0)
cell_count = tf.constant(0.0)
for v in var_dict.values():
l2r = l2r + get_l2(v)
cell_count = cell_count + tf.to_float(tf.size(v))
l2r = l2r / cell_count
l2r = l2r / tf.constant(ELEMENT_L2_FACTOR*ELEMENT_L2_FACTOR)
l2r = l2r * tf.constant(L2_WEIGHT)
mid = mid0-mid1-train_ph_dict['reward_1']
# mid = mid * mid
mid = tf.abs(mid)
mid = tf.reduce_mean(mid)
score_diff = mid
mid = mid + l2r
mid = mid + ( tf.abs( tf.reduce_mean(var_dict['b5']) ) * tf.constant(L2_WEIGHT) )
loss = mid
mid = tf.train.GradientDescentOptimizer(0.5).minimize(mid,var_list=var_dict.values())
train = mid
return train, loss, score_diff
示例12: fpn_map_rois_to_levels
def fpn_map_rois_to_levels(boxes):
"""
Assign boxes to level 2~5.
Args:
boxes (nx4):
Returns:
[tf.Tensor]: 4 tensors for level 2-5. Each tensor is a vector of indices of boxes in its level.
[tf.Tensor]: 4 tensors, the gathered boxes in each level.
Be careful that the returned tensor could be empty.
"""
sqrtarea = tf.sqrt(tf_area(boxes))
level = tf.to_int32(tf.floor(
4 + tf.log(sqrtarea * (1. / 224) + 1e-6) * (1.0 / np.log(2))))
# RoI levels range from 2~5 (not 6)
level_ids = [
tf.where(level <= 2),
tf.where(tf.equal(level, 3)), # == is not supported
tf.where(tf.equal(level, 4)),
tf.where(level >= 5)]
level_ids = [tf.reshape(x, [-1], name='roi_level{}_id'.format(i + 2))
for i, x in enumerate(level_ids)]
num_in_levels = [tf.size(x, name='num_roi_level{}'.format(i + 2))
for i, x in enumerate(level_ids)]
add_moving_summary(*num_in_levels)
level_boxes = [tf.gather(boxes, ids) for ids in level_ids]
return level_ids, level_boxes
示例13: f
def f(X):
"""
prob: n probabilities
box: nx4 boxes
Returns: n boolean, the selection
"""
prob, box = X
output_shape = tf.shape(prob)
# filter by score threshold
ids = tf.reshape(tf.where(prob > cfg.TEST.RESULT_SCORE_THRESH), [-1])
prob = tf.gather(prob, ids)
box = tf.gather(box, ids)
# NMS within each class
selection = tf.image.non_max_suppression(
box, prob, cfg.TEST.RESULTS_PER_IM, cfg.TEST.FRCNN_NMS_THRESH)
selection = tf.to_int32(tf.gather(ids, selection))
# sort available in TF>1.4.0
# sorted_selection = tf.contrib.framework.sort(selection, direction='ASCENDING')
sorted_selection = -tf.nn.top_k(-selection, k=tf.size(selection))[0]
mask = tf.sparse_to_dense(
sparse_indices=sorted_selection,
output_shape=output_shape,
sparse_values=True,
default_value=False)
return mask
示例14: hard_negative_mining
def hard_negative_mining():
bboxes_per_batch = tf.unstack(bboxes)
classification_loss_per_batch = tf.unstack(classification_loss)
num_positives_per_batch = tf.unstack(tf.reduce_sum(positives, axis=-1))
neg_class_loss_per_batch = tf.unstack(neg_class_loss_all)
neg_class_losses = []
total_negatives = []
for bboxes_per_image, classification_loss_per_image, num_positives_per_image, neg_class_loss_per_image in \
zip(bboxes_per_batch, classification_loss_per_batch, num_positives_per_batch, neg_class_loss_per_batch):
min_negatives_keep = tf.maximum(self.neg_pos_ratio * num_positives_per_image, 3)
num_negatives_keep = tf.minimum(min_negatives_keep,
tf.count_nonzero(neg_class_loss_per_image, dtype=tf.float32))
indices = tf.image.non_max_suppression(bboxes_per_image, classification_loss_per_image,
tf.to_int32(num_negatives_keep), iou_threshold=0.99)
num_negatives = tf.size(indices)
total_negatives.append(num_negatives)
expanded_indexes = tf.expand_dims(indices, axis=1) # shape: (num_negatives, 1)
negatives_keep = tf.scatter_nd(expanded_indexes, updates=tf.ones_like(indices, dtype=tf.int32),
shape=tf.shape(classification_loss_per_image)) # shape: (num_priors,)
negatives_keep = tf.to_float(tf.reshape(negatives_keep, [num_priors])) # shape: (batch_size, num_priors)
neg_class_losses.append(tf.reduce_sum(classification_loss_per_image * negatives_keep, axis=-1)) # shape: (1,)
return tf.stack(neg_class_losses), tf.reduce_sum(tf.stack(total_negatives))
示例15: content_loss
def content_loss(endpoints_dict, content_layers):
content_loss = 0
for layer in content_layers:
generated_images, content_images = tf.split(endpoints_dict[layer], 2, 0)
size = tf.size(generated_images)
content_loss += tf.nn.l2_loss(generated_images - content_images) * 2 / tf.to_float(size) # remain the same as in the paper
return content_loss