本文整理汇总了Python中tensorflow.range函数的典型用法代码示例。如果您正苦于以下问题:Python range函数的具体用法?Python range怎么用?Python range使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了range函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: loop
def loop(step_, beams_, beam_value_, golden_value_, golden_inside_, step_valid_, g_id_, golden_record, beam_record):
cur_feat_x_ = tf.gather(x, step_)
cur_golden_path_ = tf.gather(golden_path, tf.range(step_))
cur_golden_feat_ = self._add_tag_dynamic(cur_feat_x_, cur_golden_path_)
# cur_golden_output_ = self._build_cnn(cur_golden_feat_)
cur_golden_output_ = build(cur_golden_feat_)
cur_golden_node_ = tf.gather(golden_path, tf.reshape(step_, [1]))
golden_value_ = tf.add(golden_value_,
tf.slice(cur_golden_output_, tf.concat(0, [[0], cur_golden_node_]), [1, 1]))
cur_beam_ = tf.unpack(beams_, num=self.beam_size)
cur_beam_feat_ = tf.concat(0, [self._add_tag_dynamic(cur_feat_x_, tf.reshape(e, [-1])) for e in cur_beam_])
# cur_beam_output_ = self._build_cnn(cur_beam_feat_)
cur_beam_output_ = build(cur_beam_feat_)
golden_record = golden_record.write(step_, cur_golden_output_)
beam_record = beam_record.write(step_, cur_beam_output_)
beam_value_, beams_ = self._top_beams_new(cur_beam_output_, beam_value_, beams_)
new_golden_path_ = tf.gather(golden_path, tf.range(step_ + 1))
# golden_beam_id_ = index_of_tensor(new_golden_path_, beams_)
g_id_ = index_of_tensor(new_golden_path_, beams_)
golden_inside_ = tf.select(tf.less(tf.shape(g_id_)[0], 1),
tf.constant(False, tf.bool), tf.constant(True, tf.bool))
step_valid_ = tf.logical_and(tf.less(step_+1, length), tf.less(step_+1, self.max_step_tracked))
return [step_ + 1, beams_, beam_value_, golden_value_, golden_inside_, step_valid_, g_id_, golden_record, beam_record]
示例2: default_exchange_proposed_fn_
def default_exchange_proposed_fn_(num_replica, seed=None):
"""Default function for `exchange_proposed_fn` of `kernel`."""
num_replica = tf.to_int32(num_replica)
seed = distributions_util.gen_new_seed(seed, 'default_exchange_proposed_fn')
random_uniform = tf.random_uniform([], seed=seed)
accept_proposed_exchange = random_uniform < probs
seed = distributions_util.gen_new_seed(seed, 'default_exchange_proposed_fn')
zero_start = tf.random_uniform([], seed=seed) > 0.5
if num_replica % 2 == 0:
exchange_proposed = tf.where(
zero_start, tf.range(num_replica),
tf.sparse_to_dense(tf.range(num_replica - 2), (num_replica,),
tf.range(1, num_replica - 1)))
exchange_proposed_n = tf.where(zero_start, num_replica // 2,
num_replica // 2 - 1)
else:
exchange_proposed = tf.where(
zero_start, tf.range(num_replica - 1), tf.range(1, num_replica))
exchange_proposed_n = num_replica // 2
exchange_proposed = tf.reshape(exchange_proposed, (num_replica // 2, 2))
exchange_proposed = tf.where(accept_proposed_exchange, exchange_proposed,
tf.zeros_like(exchange_proposed))
exchange_proposed_n = tf.where(accept_proposed_exchange,
exchange_proposed_n,
tf.zeros_like(exchange_proposed_n))
return exchange_proposed, exchange_proposed_n
示例3: unpool_layer2x2_batch
def unpool_layer2x2_batch(self, bottom, argmax):
bottom_shape = tf.shape(bottom)
top_shape = [bottom_shape[0], bottom_shape[1] * 2, bottom_shape[2] * 2, bottom_shape[3]]
batch_size = top_shape[0]
height = top_shape[1]
width = top_shape[2]
channels = top_shape[3]
argmax_shape = tf.to_int64([batch_size, height, width, channels])
argmax = self.unravel_argmax(argmax, argmax_shape)
t1 = tf.to_int64(tf.range(channels))
t1 = tf.tile(t1, [batch_size * (width // 2) * (height // 2)])
t1 = tf.reshape(t1, [-1, channels])
t1 = tf.transpose(t1, perm=[1, 0])
t1 = tf.reshape(t1, [channels, batch_size, height // 2, width // 2, 1])
t1 = tf.transpose(t1, perm=[1, 0, 2, 3, 4])
t2 = tf.to_int64(tf.range(batch_size))
t2 = tf.tile(t2, [channels * (width // 2) * (height // 2)])
t2 = tf.reshape(t2, [-1, batch_size])
t2 = tf.transpose(t2, perm=[1, 0])
t2 = tf.reshape(t2, [batch_size, channels, height // 2, width // 2, 1])
t3 = tf.transpose(argmax, perm=[1, 4, 2, 3, 0])
t = tf.concat(4, [t2, t3, t1])
indices = tf.reshape(t, [(height // 2) * (width // 2) * channels * batch_size, 4])
x1 = tf.transpose(bottom, perm=[0, 3, 1, 2])
values = tf.reshape(x1, [-1])
return tf.scatter_nd(indices, values, tf.to_int64(top_shape))
示例4: translate
def translate(U, theta, out_height, out_width):
num_batch = tf.shape(U)[0]
height, width, num_ch = U.get_shape()[1:]
height = height.value
width = width.value
num_ch = num_ch.value
hwc = height*width*num_ch
nind = tf.range(num_batch)
x = repeat(tf.range(height), width)
y = tf.tile(tf.range(width), tf.pack([height]))
cind = tf.range(num_ch)
nind = tf.expand_dims(repeat(nind, hwc), 1)
x = tf.tile(tf.expand_dims(repeat(x, num_ch), 1), tf.pack([num_batch,1]))
y = tf.tile(tf.expand_dims(repeat(y, num_ch), 1), tf.pack([num_batch,1]))
cind = tf.tile(tf.expand_dims(cind, 1), tf.pack([num_batch*height*width,1]))
dx, dy = tf.split(1, 2, theta)
dx = tf.cast(tf.clip_by_value(dx, 0, out_height-height), 'int32')
dx = tf.reshape(tf.tile(dx, tf.pack([1,hwc])), [-1,1])
dy = tf.cast(tf.clip_by_value(dy, 0, out_width-width), 'int32')
dy = tf.reshape(tf.tile(dy, tf.pack([1,hwc])), [-1,1])
x = x + dx
y = y + dy
tind = tf.concat(1, [nind, x, y, cind])
val = tf.reshape(U, [-1])
T = tf.sparse_to_dense(tind,
tf.pack([num_batch, out_height, out_width, num_ch]),
val)
T.set_shape([None, out_height, out_width, num_ch])
return T
示例5: inference_pooling_L2norm_choose_filter
def inference_pooling_L2norm_choose_filter(images, kheight=2, kwidth=5):
# channel domain pooling mapper
split_dim = 1 # 1 represents split on spatial domain
input_image_list = split_eeg.split_eeg_signal_axes(images,
split_dim=split_dim)
input_image_length = len(input_image_list)
# the pooling mapper should choose half size of the image size
pool_s, _ = concat_eeg.pool_eeg_signal_channel(input_image_list, input_image_length/2, 1)
_print_tensor_size(pool_s)
input_shape = pool_s.get_shape()
range_even = tf.range(0, input_shape[0], 2)
range_odd = tf.range(1, input_shape[0], 2)
even_rows = tf.nn.embedding_lookup(images, range_even)
odd_rows = tf.nn.embedding_lookup(images, range_odd)
even_rows = tf.mul(pool_s,pool_s)
even_rows = tf.mul(3.0, pool_s)
even_rows = tf.nn.avg_pool(even_rows, ksize=[1, 1, 3, 1],
strides=[1, 1, 3, 1], padding='VALID')
pool_s = tf.sqrt(pool_s)
pool_s = tf.nn.max_pool(pool_s, ksize=[1, 2, 1, 1],
strides=[1, 2, 1, 1], padding='VALID')
_print_tensor_size(pool_s)
return pool_s
示例6: accuracy_instance
def accuracy_instance(predictions, targets, n=[1, 2, 3, 4, 5, 10], nb_classes=5, nb_samples_per_class=10, batch_size=1):
targets = tf.cast(targets, predictions.dtype)
accuracy = tf.constant(value=0, shape=(batch_size, nb_samples_per_class), dtype=tf.float32)
indices = tf.constant(value=0, shape=(batch_size, nb_classes+1), dtype=tf.float32)
def step_((accuracy, indices), (p, t)):
"""with tf.variable_scope("Metric_step_var", reuse=True):
accuracy = tf.get_variable(name="accuracy", shape=(batch_size, nb_samples_per_class),
initializer=tf.constant_initializer(0), dtype=tf.float32)
indices = tf.get_variable(name="indices", shape=(batch_size, nb_classes + 1),
initializer=tf.constant_initializer(0), dtype=tf.float32)"""
p = tf.cast(p, tf.int32)
t = tf.cast(t, tf.int32)
##Accuracy Update
batch_range = tf.cast(tf.range(0, batch_size), dtype=tf.int32)
gather = tf.cast(tf.gather_nd(indices,tf.stack([tf.range(0,p.get_shape().as_list()[0]), t], axis=1)), tf.int32)
index = tf.cast(tf.stack([batch_range, gather], axis=1), dtype=tf.int64)
val = tf.cast(tf.equal(p, t), tf.float32)
delta = tf.SparseTensor(indices=index, values=val, dense_shape=tf.cast(accuracy.get_shape().as_list(), tf.int64))
accuracy = accuracy + tf.sparse_tensor_to_dense(delta)
##Index Update
index = tf.cast(tf.stack([batch_range, t], axis=1), dtype=tf.int64)
val = tf.constant(1.0, shape=[batch_size])
delta = tf.SparseTensor(indices=index, values=val, dense_shape=tf.cast(indices.get_shape().as_list(), dtype=tf.int64))
indices = indices + tf.sparse_tensor_to_dense(delta)
return [accuracy, indices]
示例7: _do_maximum_mean
def _do_maximum_mean(samples, envelope, high, name=None):
"""Common code between maximum_mean and minimum_mean."""
with tf.name_scope(name, "do_maximum_mean", [samples, envelope, high]):
dtype = dtype_util.common_dtype([samples, envelope, high], tf.float32)
samples = tf.convert_to_tensor(samples, name="samples", dtype=dtype)
envelope = tf.convert_to_tensor(envelope, name="envelope", dtype=dtype)
high = tf.convert_to_tensor(high, name="high", dtype=dtype)
n = tf.rank(samples)
# Move the batch dimension of `samples` to the rightmost position,
# where the _batch_sort_vector function wants it.
perm = tf.concat([tf.range(1, n), [0]], axis=0)
samples = tf.transpose(samples, perm)
samples = _batch_sort_vector(samples)
# The maximum mean is given by taking `envelope`-worth of
# probability from the smallest samples and moving it to the
# maximum value. This amounts to:
# - ignoring the smallest k samples, where `k/n < envelope`
# - taking a `1/n - (envelope - k/n)` part of the index k sample
# - taking all the other samples
# - and adding `envelope * high` at the end.
# The following is a vectorized and batched way of computing this.
# `max_mean_contrib` is a mask implementing the previous.
batch_size = tf.shape(samples)[-1]
batch_size = tf.cast(batch_size, dtype=dtype)
step = 1. / batch_size
cum_steps = step * tf.range(1, batch_size + 1, dtype=dtype)
max_mean_contrib = tf.clip_by_value(
cum_steps - envelope[..., tf.newaxis],
clip_value_min=0.,
clip_value_max=step)
return tf.reduce_sum(samples * max_mean_contrib, axis=-1) + envelope * high
示例8: coord_addition
def coord_addition(votes, H, W):
"""Coordinate addition.
:param votes: (24, 4, 4, 32, 10, 16)
:param H, W: spaital height and width 4
:return votes: (24, 4, 4, 32, 10, 16)
"""
coordinate_offset_hh = tf.reshape(
(tf.range(H, dtype=tf.float32) + 0.50) / H, [1, H, 1, 1, 1]
)
coordinate_offset_h0 = tf.constant(
0.0, shape=[1, H, 1, 1, 1], dtype=tf.float32
)
coordinate_offset_h = tf.stack(
[coordinate_offset_hh, coordinate_offset_h0] + [coordinate_offset_h0 for _ in range(14)], axis=-1
) # (1, 4, 1, 1, 1, 16)
coordinate_offset_ww = tf.reshape(
(tf.range(W, dtype=tf.float32) + 0.50) / W, [1, 1, W, 1, 1]
)
coordinate_offset_w0 = tf.constant(
0.0, shape=[1, 1, W, 1, 1], dtype=tf.float32
)
coordinate_offset_w = tf.stack(
[coordinate_offset_w0, coordinate_offset_ww] + [coordinate_offset_w0 for _ in range(14)], axis=-1
) # (1, 1, 4, 1, 1, 16)
# (24, 4, 4, 32, 10, 16)
votes = votes + coordinate_offset_h + coordinate_offset_w
return votes
示例9: test_docstring_example
def test_docstring_example(self):
# Produce the first 1000 members of the Halton sequence in 3 dimensions.
num_results = 1000
dim = 3
with self.test_session():
sample = tfp.mcmc.sample_halton_sequence(
dim, num_results=num_results, randomized=False)
# Evaluate the integral of x_1 * x_2^2 * x_3^3 over the three dimensional
# hypercube.
powers = tf.range(1., limit=dim + 1)
integral = tf.reduce_mean(
tf.reduce_prod(sample ** powers, axis=-1))
true_value = 1. / tf.reduce_prod(powers + 1.)
# Produces a relative absolute error of 1.7%.
self.assertAllClose(integral.eval(), true_value.eval(), rtol=0.02)
# Now skip the first 1000 samples and recompute the integral with the next
# thousand samples. The sequence_indices argument can be used to do this.
sequence_indices = tf.range(start=1000, limit=1000 + num_results,
dtype=tf.int32)
sample_leaped = tfp.mcmc.sample_halton_sequence(
dim, sequence_indices=sequence_indices, randomized=False)
integral_leaped = tf.reduce_mean(
tf.reduce_prod(sample_leaped ** powers, axis=-1))
self.assertAllClose(integral_leaped.eval(), true_value.eval(), rtol=0.05)
示例10: setup
def setup(self, batch_size, num_concurrent):
# Validate the batch size
num_images = len(self.image_paths)
batch_size = min(num_images, batch_size or self.data_spec.batch_size)
if num_images % batch_size != 0:
raise ValueError(
'The total number of images ({}) must be divisible by the batch size ({}).'.format(
num_images, batch_size))
self.num_batches = num_images / batch_size
# Create a queue that will contain image paths (and their indices and extension indicator)
if self.face_bboxes is None:
self.path_bbox_queue = tf.FIFOQueue(capacity=num_images,
dtypes=[tf.int32, tf.bool, tf.string],
name='path_queue')
indices = tf.range(num_images)
self.enqueue_paths_op = self.path_bbox_queue.enqueue_many([indices, self.extension_mask,
self.image_paths])
else:
self.path_bbox_queue = tf.FIFOQueue(capacity=num_images,
dtypes=[tf.int32, tf.bool, tf.string, tf.int32],
name='path_queue')
indices = tf.range(num_images)
self.enqueue_paths_op = self.path_bbox_queue.enqueue_many([indices, self.extension_mask,
self.image_paths,self.face_bboxes])
# Close the path queue (no more additions)
self.close_path_queue_op = self.path_bbox_queue.close()
# Create an operation that dequeues a single path and returns a processed image
crop_flip = [[0,False]]
if cfg.CROP:
for i in range(1,5):
crop_flip.append([i,False])
if cfg.FLIP:
for i in range(len(crop_flip)):
crop_flip.append((crop_flip[i][0],True))
(processed_idx_list,processed_img_list) = self.process(crop_flip)
# Create a queue that will contain the processed images (and their indices)
image_shape = (self.data_spec.crop_size, self.data_spec.crop_size, self.data_spec.channels)
processed_queue = tf.FIFOQueue(capacity=int(np.ceil(len(crop_flip)*num_images / float(num_concurrent))),
dtypes=[tf.int32, tf.float32],
shapes=[(), image_shape],
name='processed_queue')
# Enqueue the processed image and path
enqueue_processed_op = processed_queue.enqueue_many([processed_idx_list,processed_img_list])
# Create a dequeue op that fetches a batch of processed images off the queue
[self.ind_deq,self.img_deq] = processed_queue.dequeue_many(batch_size)
self.dequeue_op = [self.ind_deq,self.img_deq]
# Create a queue runner to perform the processing operations in parallel
num_concurrent = min(num_concurrent, num_images)
self.queue_runner = tf.train.QueueRunner(processed_queue,
[enqueue_processed_op] * num_concurrent)
self.num_imgs = len(crop_flip)*num_images
self.num_feats_per_image = len(crop_flip)
示例11: get_position_encoding
def get_position_encoding(
length, hidden_size, min_timescale=1.0, max_timescale=1.0e4):
"""Return positional encoding.
Calculates the position encoding as a mix of sine and cosine functions with
geometrically increasing wavelengths.
Defined and formulized in Attention is All You Need, section 3.5.
Args:
length: Sequence length.
hidden_size: Size of the
min_timescale: Minimum scale that will be applied at each position
max_timescale: Maximum scale that will be applied at each position
Returns:
Tensor with shape [length, hidden_size]
"""
position = tf.to_float(tf.range(length))
num_timescales = hidden_size // 2
log_timescale_increment = (
math.log(float(max_timescale) / float(min_timescale)) /
(tf.to_float(num_timescales) - 1))
inv_timescales = min_timescale * tf.exp(
tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)
scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0)
signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)
return signal
示例12: get_idx_map
def get_idx_map(shape):
"""Get index map for a image.
Args:
shape: [B, T, H, W] or [B, H, W]
Returns:
idx: [B, T, H, W, 2], or [B, H, W, 2]
"""
s = shape
ndims = tf.shape(s)
wdim = ndims - 1
hdim = ndims - 2
idx_shape = tf.concat(0, [s, tf.constant([1])])
ones_h = tf.ones(hdim - 1, dtype='int32')
ones_w = tf.ones(wdim - 1, dtype='int32')
h_shape = tf.concat(0, [ones_h, tf.constant([-1]), tf.constant([1, 1])])
w_shape = tf.concat(0, [ones_w, tf.constant([-1]), tf.constant([1])])
idx_y = tf.zeros(idx_shape, dtype='float')
idx_x = tf.zeros(idx_shape, dtype='float')
h = tf.slice(s, ndims - 2, [1])
w = tf.slice(s, ndims - 1, [1])
idx_y += tf.reshape(tf.to_float(tf.range(h[0])), h_shape)
idx_x += tf.reshape(tf.to_float(tf.range(w[0])), w_shape)
idx = tf.concat(ndims[0], [idx_y, idx_x])
return idx
示例13: fold_batches
def fold_batches(acc, x):
b = x[0]
l = x[1]
batch = tf.tile([b], [l])
start = tf.range(l)
end = tf.minimum(tf.range(window, l + window), l)
return tf.concat([acc, tf.transpose(tf.stack([batch, start, end]))], axis=0)
示例14: loop
def loop(q_, mask, mass_, found_):
q_list = tf.dynamic_partition(q_, mask, 2)
condition_indices = tf.dynamic_partition(tf.range(tf.shape(q_)[0]), mask, 2) # 0 element it False,
# 1 element if true
p = q_list[1] * (1.0 - mass_) / tf.reduce_sum(q_list[1])
p_new = tf.dynamic_stitch(condition_indices, [q_list[0], p])
# condition verification and mask modification
less_mask = tf.cast(tf.less(u, p_new), tf.int32) # 0 when u is bigger than p, 1 when u is less than p
condition_indices = tf.dynamic_partition(tf.range(tf.shape(p_new)[0]), less_mask,
2) # 0 when u is bigger than p, 1 when u is less than p
split_p_new = tf.dynamic_partition(p_new, less_mask, 2)
split_u = tf.dynamic_partition(u, less_mask, 2)
alpha = tf.dynamic_stitch(condition_indices, [split_p_new[0], split_u[1]])
mass_ += tf.reduce_sum(split_u[1])
mask = mask * (tf.ones_like(less_mask) - less_mask)
found_ = tf.cond(tf.equal(tf.reduce_sum(less_mask), 0),
lambda: False,
lambda: True)
alpha = tf.reshape(alpha, q_.shape)
return alpha, mask, mass_, found_
示例15: _potential_scale_reduction_single_state
def _potential_scale_reduction_single_state(state, independent_chain_ndims):
"""potential_scale_reduction for one single state `Tensor`."""
with tf.name_scope(
'potential_scale_reduction_single_state',
values=[state, independent_chain_ndims]):
# We assume exactly one leading dimension indexes e.g. correlated samples
# from each Markov chain.
state = tf.convert_to_tensor(state, name='state')
sample_ndims = 1
sample_axis = tf.range(0, sample_ndims)
chain_axis = tf.range(sample_ndims,
sample_ndims + independent_chain_ndims)
sample_and_chain_axis = tf.range(
0, sample_ndims + independent_chain_ndims)
n = _axis_size(state, sample_axis)
m = _axis_size(state, chain_axis)
# In the language of Brooks and Gelman (1998),
# B / n is the between chain variance, the variance of the chain means.
# W is the within sequence variance, the mean of the chain variances.
b_div_n = _reduce_variance(
tf.reduce_mean(state, sample_axis, keepdims=True),
sample_and_chain_axis,
biased=False)
w = tf.reduce_mean(
_reduce_variance(state, sample_axis, keepdims=True, biased=True),
sample_and_chain_axis)
# sigma^2_+ is an estimate of the true variance, which would be unbiased if
# each chain was drawn from the target. c.f. "law of total variance."
sigma_2_plus = w + b_div_n
return ((m + 1.) / m) * sigma_2_plus / w - (n - 1.) / (m * n)