本文整理汇总了Python中tensorflow.ones_like方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.ones_like方法的具体用法?Python tensorflow.ones_like怎么用?Python tensorflow.ones_like使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.ones_like方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: call
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_like [as 别名]
def call(self, x):
if (self.size == None) or (self.mode == 'sum'):
self.size = int(x.shape[-1])
position_j = 1. / \
K.pow(10000., 2 * K.arange(self.size / 2, dtype='float32') / self.size)
position_j = K.expand_dims(position_j, 0)
position_i = tf.cumsum(K.ones_like(x[:, :, 0]), 1) - 1
position_i = K.expand_dims(position_i, 2)
position_ij = K.dot(position_i, position_j)
outputs = K.concatenate(
[K.cos(position_ij), K.sin(position_ij)], 2)
if self.mode == 'sum':
if self.scale:
outputs = outputs * outputs ** 0.5
return x + outputs
elif self.mode == 'concat':
return K.concatenate([outputs, x], 2)
示例2: build_inputs
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_like [as 别名]
def build_inputs(self):
if self.mode == "encode":
# Encode mode doesn't read from disk, so defer to parent.
return super(SkipThoughtsModel, self).build_inputs()
else:
# Replace disk I/O with random Tensors.
self.encode_ids = tf.random_uniform(
[self.config.batch_size, 15],
minval=0,
maxval=self.config.vocab_size,
dtype=tf.int64)
self.decode_pre_ids = tf.random_uniform(
[self.config.batch_size, 15],
minval=0,
maxval=self.config.vocab_size,
dtype=tf.int64)
self.decode_post_ids = tf.random_uniform(
[self.config.batch_size, 15],
minval=0,
maxval=self.config.vocab_size,
dtype=tf.int64)
self.encode_mask = tf.ones_like(self.encode_ids)
self.decode_pre_mask = tf.ones_like(self.decode_pre_ids)
self.decode_post_mask = tf.ones_like(self.decode_post_ids)
示例3: build_inputs
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_like [as 别名]
def build_inputs(self):
if self.mode == "inference":
# Inference mode doesn't read from disk, so defer to parent.
return super(ShowAndTellModel, self).build_inputs()
else:
# Replace disk I/O with random Tensors.
self.images = tf.random_uniform(
shape=[self.config.batch_size, self.config.image_height,
self.config.image_width, 3],
minval=-1,
maxval=1)
self.input_seqs = tf.random_uniform(
[self.config.batch_size, 15],
minval=0,
maxval=self.config.vocab_size,
dtype=tf.int64)
self.target_seqs = tf.random_uniform(
[self.config.batch_size, 15],
minval=0,
maxval=self.config.vocab_size,
dtype=tf.int64)
self.input_mask = tf.ones_like(self.input_seqs)
示例4: _std
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_like [as 别名]
def _std(self):
"""Computes the current estimate of the standard deviation.
Note that the standard deviation is not defined until at least two samples
were seen.
Returns:
Tensor of current variance.
"""
variance = tf.cond(
self._count > 1,
lambda: self._var_sum / tf.cast(self._count - 1, tf.float32),
lambda: tf.ones_like(self._var_sum) * float('nan'))
# The epsilon corrects for small negative variance values caused by
# the algorithm. It was empirically chosen to work with all environments
# tested.
return tf.sqrt(variance + 1e-4)
示例5: _load_data_graph
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_like [as 别名]
def _load_data_graph(self):
"""
Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary)
placeholders and the weights of the hidden layer of the Seq2Seq model.
:return: None
"""
# input
with tf.variable_scope("train_test", reuse=True):
# review input - Both original and reversed
self.enc_inp_fwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
for t in range(self.seq_length)]
self.enc_inp_bwd = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
for t in range(self.seq_length)]
# desired output
self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t)
for t in range(self.seq_length)]
# weight of the hidden layer
self.weights = [tf.ones_like(labels_t, dtype=tf.float32)
for labels_t in self.labels]
# Decoder input: prepend some "GO" token and drop the final
# token of the encoder input
self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
示例6: _load_data_graph
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_like [as 别名]
def _load_data_graph(self):
"""
Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary)
placeholders and the weights of the hidden layer of the Seq2Seq model.
:return: None
"""
# input
with tf.variable_scope("train_test", reuse=True):
self.enc_inp = [tf.placeholder(tf.int32, shape=(None,), name="input%i" % t)
for t in range(self.seq_length)]
# desired output
self.labels = [tf.placeholder(tf.int32, shape=(None,), name="labels%i" % t)
for t in range(self.seq_length)]
# weight of the hidden layer
self.weights = [tf.ones_like(labels_t, dtype=tf.float32)
for labels_t in self.labels]
# Decoder input: prepend some "GO" token and drop the final
# token of the encoder input
self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")] + self.labels[:-1])
示例7: _load_data_graph
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_like [as 别名]
def _load_data_graph(self):
"""
Loads the data graph consisting of the encoder and decoder input placeholders, Label (Target tip summary)
placeholders and the weights of the hidden layer of the Seq2Seq model.
:return: None
"""
# input
with tf.variable_scope("train_test", reuse=True):
self.enc_inp = [tf.placeholder(tf.int32, shape=(None,),
name="input%i" % t)
for t in range(self.seq_length)]
# desired output
self.labels = [tf.placeholder(tf.int32, shape=(None,),
name="labels%i" % t)
for t in range(self.seq_length)]
# weight of the hidden layer
self.weights = [tf.ones_like(labels_t, dtype=tf.float32)
for labels_t in self.labels]
# Decoder input: prepend some "GO" token and drop the final
# token of the encoder input
self.dec_inp = ([tf.zeros_like(self.labels[0], dtype=np.int32, name="GO")]
+ self.labels[:-1])
示例8: rank_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_like [as 别名]
def rank_loss(sentence_emb, image_emb, margin=0.2):
"""Experimental rank loss, thanks to kkurach@ for the code."""
with tf.name_scope("rank_loss"):
# Normalize first as this is assumed in cosine similarity later.
sentence_emb = tf.nn.l2_normalize(sentence_emb, 1)
image_emb = tf.nn.l2_normalize(image_emb, 1)
# Both sentence_emb and image_emb have size [batch, depth].
scores = tf.matmul(image_emb, tf.transpose(sentence_emb)) # [batch, batch]
diagonal = tf.diag_part(scores) # [batch]
cost_s = tf.maximum(0.0, margin - diagonal + scores) # [batch, batch]
cost_im = tf.maximum(
0.0, margin - tf.reshape(diagonal, [-1, 1]) + scores) # [batch, batch]
# Clear diagonals.
batch_size = tf.shape(sentence_emb)[0]
empty_diagonal_mat = tf.ones_like(cost_s) - tf.eye(batch_size)
cost_s *= empty_diagonal_mat
cost_im *= empty_diagonal_mat
return tf.reduce_mean(cost_s) + tf.reduce_mean(cost_im)
示例9: generator_loss
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_like [as 别名]
def generator_loss(loss_func, fake):
fake_loss = 0
if loss_func == 'wgan-gp' or loss_func == 'wgan-lp':
fake_loss = -tf.reduce_mean(fake)
if loss_func == 'lsgan' :
fake_loss = tf.reduce_mean(tf.square(fake - 1.0))
if loss_func == 'gan' or loss_func == 'dragan':
fake_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(fake), logits=fake))
if loss_func == 'hinge':
fake_loss = -tf.reduce_mean(fake)
loss = fake_loss
return loss
示例10: test_forward_tensor
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_like [as 别名]
def test_forward_tensor(func, wrt, *args):
"""Test gradients of functions with TFE signatures."""
def tangent_func():
df = jvp(func, wrt=wrt, optimized=True, verbose=True)
args_ = args + tuple(tf.ones_like(args[i]) for i in wrt) # seed gradient
return tensors_to_numpy(df(*args_))
def reference_func():
return tensors_to_numpy(tfe.gradients_function(func, params=wrt)(*args))
def backup_reference_func():
func_ = as_numpy_sig(func)
args_ = tensors_to_numpy(args)
return utils.numeric_grad(utils.numeric_grad(func_))(*args_)
# TODO: Should results really be that far off?
utils.assert_result_matches_reference(
tangent_func, reference_func, backup_reference_func,
tolerance=1e-4)
示例11: compute_interpolation_weights
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_like [as 别名]
def compute_interpolation_weights(inputs, keypoints, lengths):
"""Computes weights for PWL calibration.
Args:
inputs: Tensor of shape: `(D0, D1, ..., DN, 1)` which represents inputs to
to the pwl function. A typical shape is: `(batch_size, 1)`.
keypoints: Rank-1 tensor of shape `(num_keypoints - 1)` which represents
left keypoint of pieces of piecewise linear function along X axis.
lengths: Rank-1 tensor of shape `(num_keypoints - 1)` which represents
lengths of pieces of piecewise linear function along X axis.
Returns:
Interpolation weights tensor of shape: `(D0, D1, ..., DN, num_keypoints)`.
"""
weights = (inputs - keypoints) / lengths
weights = tf.minimum(weights, 1.0)
weights = tf.maximum(weights, 0.0)
# Prepend 1.0 at the beginning to add bias unconditionally.
return tf.concat([tf.ones_like(inputs), weights], axis=-1)
示例12: prepVars
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_like [as 别名]
def prepVars(self, f: int, U: List[Tensor],
X: Tensor) -> Tuple[Tensor, Tensor, Tensor]:
if f == 0:
U1 = U[1]
alpha1 = self.noiseDistribution.tau
alpha = tf.ones_like(X[:, 0])
elif f == 1:
U1 = U[0]
alpha1 = tf.ones_like(X[:, 0])
alpha = self.noiseDistribution.tau
X = tf.transpose(X)
U1T = tf.transpose(U1)
A = tf.matmul(X, U1T*alpha1[..., None])
B = tf.matmul(U1*alpha1, U1T)
return(A, B, alpha)
示例13: test_update
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_like [as 别名]
def test_update(device, f, updateType, dtype):
npdtype = dtype.as_numpy_dtype
M, K, tau = (20, 30), 3, 0.1
npU = (np.random.normal(size=(K, M[0])).astype(npdtype),
np.random.normal(size=(K, M[1])).astype(npdtype))
U = (tf.constant(npU[0]), tf.constant(npU[1]))
npnoise = np.random.normal(size=M).astype(npdtype)
npdata = np.dot(npU[0].T, npU[1]) + npnoise
data = tf.constant(npdata, dtype=dtype)
lh = Normal2dLikelihood(M=M, K=K, tau=tau, updateType=updateType)
lh.init(data=data)
lh.noiseDistribution.update = MagicMock()
residuals = tf.ones_like(data)
lh.residuals = MagicMock(return_value=residuals)
lh.update(U, data)
if updateType == UpdateType.ALL:
lh.residuals.assert_called_once()
lh.noiseDistribution.update.assert_called_once()
else:
lh.residuals.assert_not_called()
lh.noiseDistribution.update.assert_not_called()
tf.reset_default_graph()
示例14: fitGamma
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_like [as 别名]
def fitGamma(cls, tau):
alpha = 0.5/(tf.log(tf.reduce_mean(tau))
+ 1e-6 # added due to numerical instability
- tf.reduce_mean(tf.log(tau)))
for i in range(20):
alpha = (1. / (1./alpha
+ (tf.reduce_mean(tf.log(tau))
- tf.log(tf.reduce_mean(tau))
+ tf.log(alpha)
- tf.digamma(alpha))
/ (alpha**2*(1./alpha
- tf.polygamma(tf.ones_like(alpha),
alpha)))))
beta = alpha/tf.reduce_mean(tau)
return(alpha, beta)
示例15: cond
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import ones_like [as 别名]
def cond(self) -> CenNnFullyElasticNetCond:
b = self.b
mu = self.mu
tau = self.tau
betaExponential = self.betaExponential
tauLomax = self.tauLomax
b = tf.ones_like(tauLomax)*b
mu = tf.ones_like(tauLomax)*mu
tau = tf.ones_like(tauLomax)*tau
betaExponential = tf.ones_like(tauLomax)*betaExponential
name = self.name + "Cond"
properties = Properties(name=name,
drawType=self.drawType,
updateType=self.updateType,
persistent=False)
cond = CenNnFullyElasticNetCond(b=b, mu=mu, tau=tau,
betaExponential=betaExponential,
beta=1./tauLomax,
properties=properties)
return(cond)