本文整理汇总了Python中tensorflow.random_shuffle函数的典型用法代码示例。如果您正苦于以下问题:Python random_shuffle函数的具体用法?Python random_shuffle怎么用?Python random_shuffle使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了random_shuffle函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: make_minibatch
def make_minibatch(self, valid_anchors):
with tf.variable_scope('rpn_minibatch'):
# in labels(shape is [N, ]): 1 is positive, 0 is negative, -1 is ignored
labels, anchor_matched_gtboxes, object_mask = \
self.rpn_find_positive_negative_samples(valid_anchors) # [num_of_valid_anchors, ]
positive_indices = tf.reshape(tf.where(tf.equal(labels, 1.0)), [-1]) # use labels is same as object_mask
num_of_positives = tf.minimum(tf.shape(positive_indices)[0],
tf.cast(self.rpn_mini_batch_size * self.rpn_positives_ratio, tf.int32))
# num of positives <= minibatch_size * 0.5
positive_indices = tf.random_shuffle(positive_indices)
positive_indices = tf.slice(positive_indices, begin=[0], size=[num_of_positives])
# positive_anchors = tf.gather(self.anchors, positive_indices)
negative_indices = tf.reshape(tf.where(tf.equal(labels, 0.0)), [-1])
num_of_negatives = tf.minimum(self.rpn_mini_batch_size - num_of_positives,
tf.shape(negative_indices)[0])
negative_indices = tf.random_shuffle(negative_indices)
negative_indices = tf.slice(negative_indices, begin=[0], size=[num_of_negatives])
# negative_anchors = tf.gather(self.anchors, negative_indices)
minibatch_indices = tf.concat([positive_indices, negative_indices], axis=0)
minibatch_indices = tf.random_shuffle(minibatch_indices)
minibatch_anchor_matched_gtboxes = tf.gather(anchor_matched_gtboxes, minibatch_indices)
object_mask = tf.gather(object_mask, minibatch_indices)
labels = tf.cast(tf.gather(labels, minibatch_indices), tf.int32)
labels_one_hot = tf.one_hot(labels, depth=2)
return minibatch_indices, minibatch_anchor_matched_gtboxes, object_mask, labels_one_hot
示例2: fast_rcnn_minibatch
def fast_rcnn_minibatch(self, reference_boxes):
with tf.variable_scope('fast_rcnn_minibatch'):
reference_boxes_mattached_gtboxes, object_mask, label = \
self.fast_rcnn_find_positive_negative_samples(reference_boxes)
positive_indices = tf.reshape(tf.where(tf.not_equal(object_mask, 0.)), [-1])
num_of_positives = tf.minimum(tf.shape(positive_indices)[0],
tf.cast(self.fast_rcnn_minibatch_size*self.fast_rcnn_positives_ratio, tf.int32))
positive_indices = tf.random_shuffle(positive_indices)
positive_indices = tf.slice(positive_indices, begin=[0], size=[num_of_positives])
negative_indices = tf.reshape(tf.where(tf.equal(object_mask, 0.)), [-1])
num_of_negatives = tf.minimum(tf.shape(negative_indices)[0],
self.fast_rcnn_minibatch_size - num_of_positives)
negative_indices = tf.random_shuffle(negative_indices)
negative_indices = tf.slice(negative_indices, begin=[0], size=[num_of_negatives])
minibatch_indices = tf.concat([positive_indices, negative_indices], axis=0)
minibatch_indices = tf.random_shuffle(minibatch_indices)
minibatch_reference_boxes_mattached_gtboxes = tf.gather(reference_boxes_mattached_gtboxes,
minibatch_indices)
object_mask = tf.gather(object_mask, minibatch_indices)
label = tf.gather(label, minibatch_indices)
label_one_hot = tf.one_hot(label, self.num_classes + 1)
return minibatch_indices, minibatch_reference_boxes_mattached_gtboxes, object_mask, label_one_hot
示例3: __init__
def __init__(self, tensors: List[tf.Tensor], cluster_indexes: tf.Tensor, n_splits, seed, train_sampling=1.0,
test_sampling=1.0):
size = tensors[0].shape[0].value
self.seed = seed
clustered_index = self.cluster_pages(cluster_indexes)
index_len = tf.shape(clustered_index)[0]
assert_op = tf.assert_equal(index_len, size, message='n_pages is not equals to size of clustered index')
with tf.control_dependencies([assert_op]):
split_nitems = int(round(size / n_splits))
split_size = [split_nitems] * n_splits
split_size[-1] = size - (n_splits - 1) * split_nitems
splits = tf.split(clustered_index, split_size)
complements = [tf.random_shuffle(tf.concat(splits[:i] + splits[i + 1:], axis=0), seed) for i in
range(n_splits)]
splits = [tf.random_shuffle(split, seed) for split in splits]
def mk_name(prefix, tensor):
return prefix + '_' + tensor.name[:-2]
def prepare_split(i):
test_size = split_size[i]
train_size = size - test_size
test_sampled_size = int(round(test_size * test_sampling))
train_sampled_size = int(round(train_size * train_sampling))
test_idx = splits[i][:test_sampled_size]
train_idx = complements[i][:train_sampled_size]
test_set = [tf.gather(tensor, test_idx, name=mk_name('test', tensor)) for tensor in tensors]
tran_set = [tf.gather(tensor, train_idx, name=mk_name('train', tensor)) for tensor in tensors]
return Split(test_set, tran_set, test_sampled_size, train_sampled_size)
self.splits = [prepare_split(i) for i in range(n_splits)]
示例4: cifar_filename_queue
def cifar_filename_queue(filename_list):
# convert the list to a tensor
string_tensor = tf.convert_to_tensor(filename_list, dtype=tf.string)
# randomize the tensor
tf.random_shuffle(string_tensor)
# create the queue
fq = tf.FIFOQueue(capacity=10, dtypes=tf.string)
# create our enqueue_op for this q
fq_enqueue_op = fq.enqueue_many([string_tensor])
# create a QueueRunner and add to queue runner list
# we only need one thread for this simple queue
tf.train.add_queue_runner(tf.train.QueueRunner(fq, [fq_enqueue_op] * 1))
return fq
示例5: subsample_indicator
def subsample_indicator(indicator, num_samples):
"""Subsample indicator vector.
Given a boolean indicator vector with M elements set to `True`, the function
assigns all but `num_samples` of these previously `True` elements to
`False`. If `num_samples` is greater than M, the original indicator vector
is returned.
Args:
indicator: a 1-dimensional boolean tensor indicating which elements
are allowed to be sampled and which are not.
num_samples: int32 scalar tensor
Returns:
a boolean tensor with the same shape as input (indicator) tensor
"""
indices = tf.where(indicator)
indices = tf.random_shuffle(indices)
indices = tf.reshape(indices, [-1])
num_samples = tf.minimum(tf.size(indices), num_samples)
selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1]))
selected_indicator = ops.indices_to_dense_vector(selected_indices,
tf.shape(indicator)[0])
return tf.equal(selected_indicator, 1)
示例6: __init__
def __init__(self, config):
paths, meta = Input._collect(config.path)
self.dimension_count = meta['dimension_count']
self.sample_count = meta['sample_count']
self.batch_size = config.get('batch_size', 1)
if self.sample_count % self.batch_size > 0:
raise Exception(
('expected the number of samples ({}) to be ' +
'divisible by the batch size ({})').format(self.sample_count,
self.batch_size))
with tf.variable_scope('state'):
self.state = State()
with tf.variable_scope('source'):
paths = tf.Variable(paths, name='paths', dtype=tf.string,
trainable=False)
queue = tf.FIFOQueue(meta['path_count'], [tf.string])
enqueue = queue.enqueue_many([tf.random_shuffle(paths)])
tf.train.add_queue_runner(tf.train.QueueRunner(queue, [enqueue]))
_, record = tf.TFRecordReader().read(queue)
with tf.variable_scope('x'):
features = tf.parse_single_example(record, {
'data': tf.VarLenFeature(tf.float32),
})
data = tf.sparse_tensor_to_dense(features['data'])
if self.batch_size == 1:
self.x = tf.reshape(data, [1, -1, self.dimension_count])
else:
x = tf.reshape(data, [-1, self.dimension_count])
_, outputs = tf.contrib.training.bucket_by_sequence_length(
tf.shape(x)[0], [x], self.batch_size, config.buckets,
dynamic_pad=True)
self.x = outputs[0]
with tf.variable_scope('y'):
self.y = tf.pad(self.x[:, 1:, :], [[0, 0], [0, 1], [0, 0]])
示例7: generate_one
def generate_one(d):
seed = stream()
fn = lambda _: tf.random_shuffle(tf.range(d), seed=seed)
return tf.map_fn(
fn,
sample_range,
parallel_iterations=1 if seed is not None else 10)
示例8: get_svtcn_indices
def get_svtcn_indices(seq_len, batch_size, num_views):
"""Gets a random window of contiguous time indices from a sequence.
Args:
seq_len: Int, number of timesteps in the image sequence.
batch_size: Int, size of the batch to construct.
num_views: Int, the number of simultaneous viewpoints at each
timestep in the dataset.
Returns:
time_indices: 1-D Int `Tensor` with size [batch_size], holding the
timestep for each batch image.
view_indices: 1-D Int `Tensor` with size [batch_size], holding the
view for each batch image. This is consistent across the batch.
"""
# Get anchor, positive time indices.
def f1():
# Choose a random contiguous range from within the sequence.
range_min = tf.random_shuffle(tf.range(seq_len-batch_size))[0]
range_max = range_min+batch_size
return tf.range(range_min, range_max)
def f2():
# Consider the full sequence.
return tf.range(seq_len)
time_indices = tf.cond(tf.greater(seq_len, batch_size), f1, f2)
# Get opposing anchor, positive view indices.
random_view = tf.random_shuffle(tf.range(num_views))[0]
view_indices = tf.tile([random_view], (batch_size,))
return time_indices, view_indices
示例9: build
def build(self, input_shape):
input_dim = input_shape[1]
#Per tree
N_DECISION = (2 ** (self.n_depth)) - 1 # Number of decision nodes
N_LEAF = 2 ** (self.n_depth + 1) # Number of leaf nodes
if self.randomize_training:
#Construct a mask that lets N trees get trained per minibatch
train_mask = np.zeros(self.n_trees, dtype=np.float32)
for i in xrange(self.randomize_training):
train_mask[i] = 1
self.random_mask = tf.random_shuffle(tf.constant(train_mask))
self.w_d_ensemble = []
self.w_l_ensemble = []
self.trainable_weights = []
for i in xrange(self.n_trees):
decision_weights = self.d_init((input_dim, N_DECISION), name=self.name+"_tree"+i+"_dW")
leaf_distributions = self.l_init((N_LEAF, self.output_classes), name=self.name+"_tree"+i+"_lW")
self.trainable_weights.append(decision_weights)
self.trainable_weights.append(leaf_distributions)
if self.randomize_training:
do_gradient = self.random_mask[i]
no_gradient = 1 - do_gradient
#This should always allow inference, but block gradient flow when do_gradient = 0
decision_weights = do_gradient * decision_weights + no_gradient * tf.stop_gradient(decision_weights)
leaf_distributions = do_gradient * leaf_distributions + no_gradient * tf.stop_gradient(leaf_distributions)
self.w_d_ensemble.append(decision_weights)
self.w_l_ensemble.append(leaf_distributions)
示例10: get_random_scale
def get_random_scale(min_scale_factor, max_scale_factor, step_size):
"""Gets a random scale value.
Args:
min_scale_factor: Minimum scale value.
max_scale_factor: Maximum scale value.
step_size: The step size from minimum to maximum value.
Returns:
A random scale value selected between minimum and maximum value.
Raises:
ValueError: min_scale_factor has unexpected value.
"""
if min_scale_factor < 0 or min_scale_factor > max_scale_factor:
raise ValueError('Unexpected value of min_scale_factor.')
if min_scale_factor == max_scale_factor:
return tf.cast(min_scale_factor, tf.float32)
# When step_size = 0, we sample the value uniformly from [min, max).
if step_size == 0:
return tf.random_uniform([1],
minval=min_scale_factor,
maxval=max_scale_factor)
# When step_size != 0, we randomly select one discrete value from [min, max].
num_steps = int((max_scale_factor - min_scale_factor) / step_size + 1)
scale_factors = tf.lin_space(min_scale_factor, max_scale_factor, num_steps)
shuffled_scale_factors = tf.random_shuffle(scale_factors)
return shuffled_scale_factors[0]
示例11: MiniminibatchLayer
def MiniminibatchLayer(name, n_in, dim_b, dim_c, group_size, inputs):
inputs = tf.random_shuffle(inputs)
inputs = tf.reshape(inputs, [-1, group_size, n_in])
def f(a,x):
return MinibatchLayer(name, n_in, dim_b, dim_c, x)
outputs = tf.scan(f, inputs)
return tf.reshape(outputs, [-1, n_in+dim_b])
示例12: _add_gtboxes_as_first_stage_proposals
def _add_gtboxes_as_first_stage_proposals(self, first_stage_proposals, first_stage_scores, gtboxes):
# 1. jitter gtboxes
ws = gtboxes[:, 2]
hs = gtboxes[:, 3]
thetas = gtboxes[:, 4]
hs_offset = (tf.random_normal(shape=tf.shape(hs)) - 0.5)*0.1*hs
ws_offset = (tf.random_normal(shape=tf.shape(ws)) - 0.5)*0.1*ws
thetas_offset = (tf.random_normal(shape=tf.shape(thetas)) - 0.5)*0.1*thetas
hs = hs + hs_offset
ws = ws + ws_offset
thetas = thetas + thetas_offset
new_boxes = tf.transpose(tf.stack([gtboxes[:, 0], gtboxes[:, 1], ws, hs, thetas], axis=0))
# 2. get needed added gtboxes
num_needed_add = tf.minimum(tf.cast(cfgs.FAST_RCNN_MINIBATCH_SIZE*cfgs.FAST_RCNN_POSITIVE_RATE*0.5, tf.int32),
tf.shape(gtboxes)[0])
added_boxes_indices = tf.random_shuffle(tf.range(start=0, limit=tf.shape(new_boxes)[0]))
added_boxes_indices = tf.slice(added_boxes_indices, begin=[0], size=[num_needed_add])
added_boxes = tf.gather(new_boxes, added_boxes_indices)
# 3. add them
all_boxes = tf.concat([first_stage_proposals, added_boxes], axis=0)
all_scores = tf.concat([first_stage_scores, tf.ones(shape=[tf.shape(added_boxes)[0]])*0.95], axis=0)
return all_boxes, all_scores
示例13: _build_graph
def _build_graph(self):
"""Construct tensorflow nodes for round of clustering"""
# N.B. without tf.Variable, makes awesome glitchy clustered images
self.centroids_in = tf.Variable(tf.slice(tf.random_shuffle(self.arr),
[0, 0], [self.k, -1]), name="centroids_in")
# tiled should be shape(self.n_pixels, self.k, size_data = 2 + self.channels)
tiled_pix = tf.tile(tf.expand_dims(self.arr, 1),
multiples=[1, self.k, 1], name="tiled_pix")
# no need to take square root b/c positive reals and sqrt are isomorphic
def radical_euclidean_dist(x, y):
"""Takes in 2 tensors and returns euclidean distance radical, i.e. dist**2"""
with tf.name_scope("radical_euclidean"):
return tf.square(tf.sub(x, y))
# should be shape(self.n_pixels, self.k)
distances = tf.reduce_sum(radical_euclidean_dist(tiled_pix, self.centroids_in),
reduction_indices=2, name="distances")
# should be shape(self.n_pixels)
nearest = tf.to_int32(tf.argmin(distances, 1), name="nearest")
# should be list of len self.k with tensors of shape(size_cluster, size_data)
self.clusters = tf.dynamic_partition(self.arr, nearest, self.k)
# should be shape(self.k, size_data)
self.centroids = tf.pack([tf.reduce_mean(cluster, 0) for cluster in self.clusters],
name="centroids_out")
self.update_roids = tf.assign(self.centroids_in, self.centroids)
示例14: scheduled_sample_count
def scheduled_sample_count(ground_truth_x,
generated_x,
batch_size,
scheduled_sample_var):
"""Sample batch with specified mix of groundtruth and generated data points.
Args:
ground_truth_x: tensor of ground-truth data points.
generated_x: tensor of generated data points.
batch_size: batch size
scheduled_sample_var: number of ground-truth examples to include in batch.
Returns:
New batch with num_ground_truth sampled from ground_truth_x and the rest
from generated_x.
"""
num_ground_truth = scheduled_sample_var
idx = tf.random_shuffle(tf.range(batch_size))
ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))
generated_idx = tf.gather(idx, tf.range(num_ground_truth, batch_size))
ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)
generated_examps = tf.gather(generated_x, generated_idx)
output = tf.dynamic_stitch([ground_truth_idx, generated_idx],
[ground_truth_examps, generated_examps])
# if batch size is known set it.
if isinstance(batch_size, int):
output.set_shape([batch_size] + common_layers.shape_list(output)[1:])
return output
示例15: PreDiscriminator
def PreDiscriminator(inputs):
outputs = []
for n_rows in [784]:
output = tf.reshape(inputs, [-1, n_rows, 1])
output = tf.gather(output, tf.random_shuffle(tf.range((784/n_rows)*BATCH_SIZE))[:BATCH_SIZE])
output = lib.ops.gru.GRU('Discriminator.GRU_{}'.format(1), 1, 256, output)
outputs.append(output)
return outputs