本文整理汇总了Python中tensorflow.random_uniform函数的典型用法代码示例。如果您正苦于以下问题:Python random_uniform函数的具体用法?Python random_uniform怎么用?Python random_uniform使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了random_uniform函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, args):
with tf.device(args.device):
def circle(x):
spherenet = tf.square(x)
spherenet = tf.reduce_sum(spherenet, 1)
lam = tf.sqrt(spherenet)
return x/tf.reshape(lam,[int(lam.get_shape()[0]), 1])
def modes(x):
shape = x.get_shape()
return tf.round(x*2)/2.0#+tf.random_normal(shape, 0, 0.04)
if args.distribution == 'circle':
x = tf.random_normal([args.batch_size, 2])
x = circle(x)
elif args.distribution == 'modes':
x = tf.random_uniform([args.batch_size, 2], -1, 1)
x = modes(x)
elif args.distribution == 'modal-gaussian':
x = tf.random_uniform([args.batch_size, 2], -1, 1)
y = tf.random_normal([args.batch_size, 2], stddev=0.04, mean=0.15)
x = tf.round(x) + y
elif args.distribution == 'sin':
x = tf.random_uniform((1, args.batch_size), -10.5, 10.5 )
x = tf.transpose(x)
r_data = tf.random_normal((args.batch_size,1), mean=0, stddev=0.1)
xy = tf.sin(0.75*x)*7.0+x*0.5+r_data*1.0
x = tf.concat([xy,x], 1)/16.0
elif args.distribution == 'static-point':
x = tf.ones([args.batch_size, 2])
self.x = x
self.xy = tf.zeros_like(self.x)
示例2: testDiscretizedMixLogisticLoss
def testDiscretizedMixLogisticLoss(self):
batch = 2
height = 4
width = 4
channels = 3
num_mixtures = 5
logits = tf.concat( # assign all probability mass to first component
[tf.ones([batch, height, width, 1]) * 1e8,
tf.zeros([batch, height, width, num_mixtures - 1])],
axis=-1)
locs = tf.random_uniform([batch, height, width, num_mixtures * 3],
minval=-.9, maxval=.9)
log_scales = tf.random_uniform([batch, height, width, num_mixtures * 3],
minval=-1., maxval=1.)
coeffs = tf.atanh(tf.zeros([batch, height, width, num_mixtures * 3]))
pred = tf.concat([logits, locs, log_scales, coeffs], axis=-1)
# Test labels that don't satisfy edge cases where 8-bit value is 0 or 255.
labels = tf.random_uniform([batch, height, width, channels],
minval=-.9, maxval=.9)
locs_0 = locs[..., :3]
log_scales_0 = log_scales[..., :3]
centered_labels = labels - locs_0
inv_stdv = tf.exp(-log_scales_0)
plus_in = inv_stdv * (centered_labels + 1. / 255.)
min_in = inv_stdv * (centered_labels - 1. / 255.)
cdf_plus = tf.nn.sigmoid(plus_in)
cdf_min = tf.nn.sigmoid(min_in)
expected_loss = -tf.reduce_sum(tf.log(cdf_plus - cdf_min), axis=-1)
actual_loss = common_layers.discretized_mix_logistic_loss(
pred=pred, labels=labels)
actual_loss_val, expected_loss_val = self.evaluate(
[actual_loss, expected_loss])
self.assertAllClose(actual_loss_val, expected_loss_val, rtol=1e-5)
示例3: __init__
def __init__(self, dim_image, n_words, dim_hidden, batch_size, n_lstm_steps, drop_out_rate, bias_init_vector=None):
self.dim_image = dim_image
self.n_words = n_words
self.dim_hidden = dim_hidden
self.batch_size = batch_size
self.n_lstm_steps = n_lstm_steps
self.drop_out_rate = drop_out_rate
with tf.device("/gpu:2"):
self.Wemb = tf.Variable(tf.random_uniform([n_words, dim_hidden], -0.1, 0.1), name='Wemb')
# self.lstm1 = rnn_cell.BasicLSTMCell(dim_hidden)
# self.lstm2 = rnn_cell.BasicLSTMCell(dim_hidden)
self.lstm1 = rnn_cell.LSTMCell(self.dim_hidden,self.dim_hidden,use_peepholes = True)
self.lstm1_dropout = rnn_cell.DropoutWrapper(self.lstm1,output_keep_prob=1 - self.drop_out_rate)
self.lstm2 = rnn_cell.LSTMCell(self.dim_hidden,self.dim_hidden,use_peepholes = True)
self.lstm2_dropout = rnn_cell.DropoutWrapper(self.lstm2,output_keep_prob=1 - self.drop_out_rate)
# W is Weight, b is Bias
self.encode_image_W = tf.Variable( tf.random_uniform([dim_image, dim_hidden], -0.1, 0.1), name='encode_image_W')
self.encode_image_b = tf.Variable( tf.zeros([dim_hidden]), name='encode_image_b')
self.embed_word_W = tf.Variable(tf.random_uniform([dim_hidden, n_words], -0.1,0.1), name='embed_word_W')
if bias_init_vector is not None:
self.embed_word_b = tf.Variable(bias_init_vector.astype(np.float32), name='embed_word_b')
else:
self.embed_word_b = tf.Variable(tf.zeros([n_words]), name='embed_word_b')
示例4: denseNet
def denseNet(self, hidden=20, depth=3, act=tf.nn.tanh, dropout=True, norm=None): #
if (hidden > 100): print("WARNING: denseNet uses quadratic mem for " + str(hidden))
if (depth < 3): print(
"WARNING: did you mean to use Fully connected layer 'dense'? Expecting depth>3 vs " + str(depth))
inputs = self.last_layer
inputs_width = self.last_width
width = hidden
while depth > 0:
with tf.name_scope('DenNet_{:d}'.format(width)) as scope:
print("dense width ", inputs_width, "x", width)
nr = len(self.layers)
weights = tf.Variable(tf.random_uniform([inputs_width, width], minval=-1. / width, maxval=1. / width),
name="weights")
bias = tf.Variable(tf.random_uniform([width], minval=-1. / width, maxval=1. / width),
name="bias") # auto nr + context
dense1 = tf.matmul(inputs, weights, name='dense_' + str(nr)) + bias
tf.summary.histogram('dense_' + str(nr), dense1)
tf.summary.histogram('dense_' + str(nr) + '/sparsity', tf.nn.zero_fraction(dense1))
tf.summary.histogram('weights_' + str(nr), weights)
tf.summary.histogram('weights_' + str(nr) + '/sparsity', tf.nn.zero_fraction(weights))
tf.summary.histogram('bias_' + str(nr), bias)
if act: dense1 = act(dense1)
if norm: dense1 = self.norm(dense1, lsize=1) # SHAPE!
if dropout: dense1 = tf.nn.dropout(dense1, self.keep_prob)
self.add(dense1)
self.last_width = width
inputs = tf.concat(1, [inputs, dense1])
inputs_width += width
depth = depth - 1
self.last_width = width
示例5: test_get_expected_feature_map_shapes_with_inception_v3
def test_get_expected_feature_map_shapes_with_inception_v3(self):
image_features = {
'Mixed_5d': tf.random_uniform([4, 35, 35, 256], dtype=tf.float32),
'Mixed_6e': tf.random_uniform([4, 17, 17, 576], dtype=tf.float32),
'Mixed_7c': tf.random_uniform([4, 8, 8, 1024], dtype=tf.float32)
}
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=INCEPTION_V3_LAYOUT,
depth_multiplier=1,
min_depth=32,
insert_1x1_conv=True,
image_features=image_features)
expected_feature_map_shapes = {
'Mixed_5d': (4, 35, 35, 256),
'Mixed_6e': (4, 17, 17, 576),
'Mixed_7c': (4, 8, 8, 1024),
'Mixed_7c_2_Conv2d_3_3x3_s2_512': (4, 4, 4, 512),
'Mixed_7c_2_Conv2d_4_3x3_s2_256': (4, 2, 2, 256),
'Mixed_7c_2_Conv2d_5_3x3_s2_128': (4, 1, 1, 128)}
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
out_feature_maps = sess.run(feature_maps)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(out_feature_map_shapes, expected_feature_map_shapes)
示例6: __init__
def __init__(self, config):
self.config = config
self.input = tf.placeholder('int32', [self.config.batch_size, config.max_seq_len], name='input')
self.labels = tf.placeholder('int64', [self.config.batch_size], name='labels')
self.labels_one_hot = tf.one_hot(indices=self.labels,
depth=config.output_dim,
on_value=1.0,
off_value=0.0,
axis=-1)
self.gru = GRUCell(config.hidden_state_dim)
embeddings_we = tf.get_variable('word_embeddings', initializer=tf.random_uniform([config.vocab_size, config.embedding_dim], -1.0, 1.0))
self.emb = embed_input = tf.nn.embedding_lookup(embeddings_we, self.input)
inputs = [tf.squeeze(i, squeeze_dims=[1]) for i in tf.split(1, config.max_seq_len, embed_input)]
outputs, last_slu_state = tf.nn.rnn(
cell=self.gru,
inputs=inputs,
dtype=tf.float32,)
w_project = tf.get_variable('project2labels', initializer=tf.random_uniform([config.hidden_state_dim, config.output_dim], -1.0, 1.0))
self.logits = logits_bo = tf.matmul(last_slu_state, w_project)
tf.histogram_summary('logits', logits_bo)
self.probabilities = tf.nn.softmax(logits_bo)
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits_bo, self.labels_one_hot))
self.predict = tf.nn.softmax(logits_bo)
# TensorBoard
self.accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(self.predict, 1), self.labels), 'float32'), name='accuracy')
tf.scalar_summary('CCE loss', self.loss)
tf.scalar_summary('Accuracy', self.accuracy)
self.tb_info = tf.merge_all_summaries()
示例7: test_get_expected_feature_map_shapes_with_embedded_ssd_mobilenet_v1
def test_get_expected_feature_map_shapes_with_embedded_ssd_mobilenet_v1(
self, use_keras):
image_features = {
'Conv2d_11_pointwise': tf.random_uniform([4, 16, 16, 512],
dtype=tf.float32),
'Conv2d_13_pointwise': tf.random_uniform([4, 8, 8, 1024],
dtype=tf.float32),
}
feature_map_generator = self._build_feature_map_generator(
feature_map_layout=EMBEDDED_SSD_MOBILENET_V1_LAYOUT,
use_keras=use_keras
)
feature_maps = feature_map_generator(image_features)
expected_feature_map_shapes = {
'Conv2d_11_pointwise': (4, 16, 16, 512),
'Conv2d_13_pointwise': (4, 8, 8, 1024),
'Conv2d_13_pointwise_2_Conv2d_2_3x3_s2_512': (4, 4, 4, 512),
'Conv2d_13_pointwise_2_Conv2d_3_3x3_s2_256': (4, 2, 2, 256),
'Conv2d_13_pointwise_2_Conv2d_4_2x2_s2_256': (4, 1, 1, 256)}
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
out_feature_maps = sess.run(feature_maps)
out_feature_map_shapes = dict(
(key, value.shape) for key, value in out_feature_maps.items())
self.assertDictEqual(expected_feature_map_shapes, out_feature_map_shapes)
示例8: testUnit4
def testUnit4(self):
x1 = tf.random_uniform([1, 19, 19, 1024])
x2 = tf.random_uniform([1, 19, 19, 1024])
x1, x2 = revnet.unit(x1, x2, block_num=4, depth=416,
num_layers=1, stride=2)
self.assertEquals(x1.get_shape().as_list(), [1, 10, 10, 1664])
self.assertEquals(x2.get_shape().as_list(), [1, 10, 10, 1664])
示例9: testUnit3D
def testUnit3D(self):
x1 = tf.random_uniform([4, 74, 74, 74, 256])
x2 = tf.random_uniform([4, 74, 74, 74, 256])
x1, x2 = revnet.unit(x1, x2, block_num=5, depth=128,
num_layers=1, dim='3d', stride=2)
self.assertEquals(x1.get_shape().as_list(), [4, 37, 37, 37, 512])
self.assertEquals(x2.get_shape().as_list(), [4, 37, 37, 37, 512])
示例10: testUnit1
def testUnit1(self):
x1 = tf.random_uniform([4, 74, 74, 256])
x2 = tf.random_uniform([4, 74, 74, 256])
x1, x2 = revnet.unit(x1, x2, block_num=1, depth=64,
first_batch_norm=True, num_layers=1)
self.assertEquals(x1.get_shape().as_list(), [4, 74, 74, 256])
self.assertEquals(x2.get_shape().as_list(), [4, 74, 74, 256])
示例11: testUnit3
def testUnit3(self):
x1 = tf.random_uniform([1, 37, 37, 512])
x2 = tf.random_uniform([1, 37, 37, 512])
x1, x2 = revnet.unit(x1, x2, block_num=3, depth=256,
num_layers=10, stride=2)
self.assertEquals(x1.get_shape().as_list(), [1, 19, 19, 1024])
self.assertEquals(x2.get_shape().as_list(), [1, 19, 19, 1024])
示例12: get_online_sequences
def get_online_sequences(sequence_length, batch_size):
"""Gets tensor which constantly produce new random examples.
Args:
sequence_length: total length of the sequences.
batch_size: how many at a time.
Returns:
(data, targets): data is `[sequence_length, batch_size, 2]` and targets
are `[batch_size]`.
"""
# getting the random channel is easy
random_data = tf.random_uniform([sequence_length, batch_size, 1],
minval=0.0, maxval=1.0)
# now we need a random marker in each half of the data
random_index_1 = tf.random_uniform([1, batch_size], minval=0,
maxval=sequence_length//2,
dtype=tf.int32)
random_index_2 = tf.random_uniform([1, batch_size], minval=0,
maxval=sequence_length//2,
dtype=tf.int32)
markers = tf.concat(axis=2, values=[tf.one_hot(random_index_1, sequence_length//2),
tf.one_hot(random_index_2, sequence_length//2)])
markers = tf.transpose(markers)
targets = tf.reduce_sum(random_data * markers,
axis=0)
return tf.concat(axis=2, values=[random_data, markers]), tf.squeeze(targets)
示例13: benchmarkEagerLinearRegression
def benchmarkEagerLinearRegression(self):
num_batches = 200
batch_size = 64
dataset = linear_regression.synthetic_dataset(
w=tf.random_uniform([3, 1]),
b=tf.random_uniform([1]),
noise_level=0.01,
batch_size=batch_size,
num_batches=num_batches)
burn_in_dataset = dataset.take(10)
model = linear_regression.LinearModel()
with tf.device(device()):
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
# Perform burn-in.
linear_regression.fit(model, burn_in_dataset, optimizer)
start_time = time.time()
linear_regression.fit(model, dataset, optimizer)
wall_time = time.time() - start_time
examples_per_sec = num_batches * batch_size / wall_time
self.report_benchmark(
name="eager_train_%s" %
("gpu" if tfe.num_gpus() > 0 else "cpu"),
iters=num_batches,
extras={"examples_per_sec": examples_per_sec},
wall_time=wall_time)
示例14: __init__
def __init__(self, dh, dq, da, di, max_q, Nq, Na, cell='rnn',trainable_embeddings=True):
self.dh = dh
self.dq = dq
self.da = da
self.di = di
self.max_q = max_q
self.Nq = Nq
self.Na = Na
self.cell = cell
with tf.device('/cpu:0'):
self.qemb_W = tf.get_variable('qemb_w',
initializer=tf.random_uniform([self.Nq, self.dq], -0.1, 0.1),
trainable = trainable_embeddings)
self.aemb_W = tf.get_variable(name='aemb_w',
initializer=tf.random_uniform([self.dh, self.Na], -0.1, 0.1))
self.aemb_b = tf.get_variable(name='aemb_b',
initializer=tf.zeros([self.Na]))
self.Wi = tf.get_variable(name='Wi', shape=[self.di, self.dq],
initializer=tf.contrib.layers.xavier_initializer())
self.bi = tf.get_variable(name='bi',
initializer=tf.zeros([self.dq]))
if self.cell == 'rnn':
self.recur = tf.nn.rnn_cell.RNNCell(self.dh)
elif self.cell == 'lstm':
self.recur = tf.nn.rnn_cell.LSTMCell(self.dh)
elif self.cell == 'gru':
self.recur = tf.nn.rnn_cell.GRUCell(self.dh)
else:
raise NotImplementedError
示例15: input_fn
def input_fn(params):
"""Generated input_fn for the given epoch."""
batch_size = (params["batch_size"] if is_training else
params["eval_batch_size"] or params["batch_size"])
num_users = params["num_users"]
num_items = params["num_items"]
users = tf.random_uniform([batch_size], dtype=tf.int32, minval=0,
maxval=num_users)
items = tf.random_uniform([batch_size], dtype=tf.int32, minval=0,
maxval=num_items)
if is_training:
labels = tf.random_uniform([batch_size], dtype=tf.int32, minval=0,
maxval=2)
data = {
movielens.USER_COLUMN: users,
movielens.ITEM_COLUMN: items,
}, labels
else:
dupe_mask = tf.cast(tf.random_uniform([batch_size], dtype=tf.int32,
minval=0, maxval=2), tf.bool)
data = {
movielens.USER_COLUMN: users,
movielens.ITEM_COLUMN: items,
rconst.DUPLICATE_MASK: dupe_mask,
}
dataset = tf.data.Dataset.from_tensors(data).repeat(
SYNTHETIC_BATCHES_PER_EPOCH)
dataset = dataset.prefetch(32)
return dataset