本文整理匯總了Python中tensorflow.keras.backend.variable方法的典型用法代碼示例。如果您正苦於以下問題:Python backend.variable方法的具體用法?Python backend.variable怎麽用?Python backend.variable使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.keras.backend
的用法示例。
在下文中一共展示了backend.variable方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_amplitude_to_decibel
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import variable [as 別名]
def test_amplitude_to_decibel():
"""test for backend_keras.amplitude_to_decibel"""
from kapre.backend_keras import amplitude_to_decibel
x = np.array([[1e-20, 1e-5, 1e-3, 5e-2], [0.3, 1.0, 20.5, 9999]]) # random positive numbers
amin = 1e-5
dynamic_range = 80.0
x_decibel = 10 * np.log10(np.maximum(x, amin))
x_decibel = x_decibel - np.max(x_decibel, axis=(1,), keepdims=True)
x_decibel_ref = np.maximum(x_decibel, -1 * dynamic_range)
x_var = K.variable(x)
x_decibel_kapre = amplitude_to_decibel(x_var, amin, dynamic_range)
assert np.allclose(K.eval(x_decibel_kapre), x_decibel_ref, atol=TOL)
示例2: build
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import variable [as 別名]
def build(self, input_shape):
# Create mean and count
# These are weights because just maintaining variables don't get saved with the model, and we'd like
# to have these numbers saved when we save the model.
# But we need to make sure that the weights are untrainable.
self.mean = self.add_weight(name='mean',
shape=input_shape[1:],
initializer='zeros',
trainable=False)
self.count = self.add_weight(name='count',
shape=[1],
initializer='zeros',
trainable=False)
# self.mean = K.zeros(input_shape[1:], name='mean')
# self.count = K.variable(0.0, name='count')
super(MeanStream, self).build(input_shape) # Be sure to call this somewhere!
示例3: init_neurons
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import variable [as 別名]
def init_neurons(self, input_shape):
"""Init layer neurons."""
from snntoolbox.bin.utils import get_log_keys, get_plot_keys
output_shape = self.compute_output_shape(input_shape)
self.v_thresh = k.variable(self._v_thresh)
self.mem = k.variable(self.init_membrane_potential(output_shape))
self.time = k.variable(self.dt)
# To save memory and computations, allocate only where needed:
if self.tau_refrac > 0:
self.refrac_until = k.zeros(output_shape)
if any({'spiketrains', 'spikerates', 'correlation', 'spikecounts',
'hist_spikerates_activations', 'operations',
'synaptic_operations_b_t', 'neuron_operations_b_t',
'spiketrains_n_b_l_t'} & (get_plot_keys(self.config) |
get_log_keys(self.config))):
self.spiketrain = k.zeros(output_shape)
self.last_spiketimes = k.variable(-np.ones(output_shape))
示例4: dice_weighted
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import variable [as 別名]
def dice_weighted(weights):
weights = K.variable(weights)
def weighted_loss(y_true, y_pred, smooth=0.00001):
axis = identify_axis(y_true.get_shape())
intersection = y_true * y_pred
intersection = K.sum(intersection, axis=axis)
y_true = K.sum(y_true, axis=axis)
y_pred = K.sum(y_pred, axis=axis)
dice = ((2 * intersection) + smooth) / (y_true + y_pred + smooth)
dice = dice * weights
return -dice
return weighted_loss
#-----------------------------------------------------#
# Dice & Crossentropy loss #
#-----------------------------------------------------#
示例5: build
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import variable [as 別名]
def build(self, input_shape):
self.n_ch = input_shape[1]
self.len_src = input_shape[2]
self.is_mono = self.n_ch == 1
if self.image_data_format == 'channels_first':
self.ch_axis_idx = 1
else:
self.ch_axis_idx = 3
if self.len_src is not None:
assert self.len_src >= self.n_dft, 'Hey! The input is too short!'
self.n_frame = conv_output_length(self.len_src, self.n_dft, self.padding, self.n_hop)
dft_real_kernels, dft_imag_kernels = backend.get_stft_kernels(self.n_dft)
self.dft_real_kernels = K.variable(dft_real_kernels, dtype=K.floatx(), name="real_kernels")
self.dft_imag_kernels = K.variable(dft_imag_kernels, dtype=K.floatx(), name="imag_kernels")
# kernels shapes: (filter_length, 1, input_dim, nb_filter)?
if self.trainable_kernel:
self.trainable_weights.append(self.dft_real_kernels)
self.trainable_weights.append(self.dft_imag_kernels)
else:
self.non_trainable_weights.append(self.dft_real_kernels)
self.non_trainable_weights.append(self.dft_imag_kernels)
super(Spectrogram, self).build(input_shape)
# self.built = True
示例6: build
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import variable [as 別名]
def build(self, input_shape):
if self.image_data_format == 'channels_first':
self.n_ch = input_shape[1]
self.n_freq = input_shape[2]
self.n_time = input_shape[3]
else:
self.n_ch = input_shape[3]
self.n_freq = input_shape[1]
self.n_time = input_shape[2]
if self.init == 'mel':
self.filterbank = K.variable(
backend.filterbank_mel(
sr=self.sr,
n_freq=self.n_freq,
n_mels=self.n_fbs,
fmin=self.fmin,
fmax=self.fmax,
).transpose(),
dtype=K.floatx(),
)
elif self.init == 'log':
self.filterbank = K.variable(
backend.filterbank_log(
sr=self.sr,
n_freq=self.n_freq,
n_bins=self.n_fbs,
bins_per_octave=self.bins_per_octave,
fmin=self.fmin,
).transpose(),
dtype=K.floatx(),
)
if self.trainable_fb:
self.trainable_weights.append(self.filterbank)
else:
self.non_trainable_weights.append(self.filterbank)
super(Filterbank, self).build(input_shape)
self.built = True
示例7: __init__
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import variable [as 別名]
def __init__(self, learning_rate=0.001, beta_1=0.9, beta_2=0.999,
epsilon=None, decay=0., amsgrad=False,
model=None, zero_penalties=True, batch_size=32,
total_iterations=0, total_iterations_wd=None,
use_cosine_annealing=False, lr_multipliers=None,
weight_decays=None, init_verbose=True,
eta_min=0, eta_max=1, t_cur=0, name="AdamW", **kwargs):
if total_iterations > 1:
weight_decays = _init_weight_decays(model, zero_penalties,
weight_decays)
eta_t = kwargs.pop('eta_t', 1.)
super(AdamW, self).__init__(name, **kwargs)
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
self._set_hyper('decay', self._initial_decay)
self._set_hyper('beta_1', beta_1)
self._set_hyper('beta_2', beta_2)
self.eta_min = K.constant(eta_min, name='eta_min')
self.eta_max = K.constant(eta_max, name='eta_max')
self.eta_t = K.variable(eta_t, dtype='float32', name='eta_t')
self.t_cur = K.variable(t_cur, dtype='int64', name='t_cur')
self.batch_size = batch_size
self.total_iterations = total_iterations
self.total_iterations_wd = total_iterations_wd or total_iterations
self.lr_multipliers = lr_multipliers
self.weight_decays = weight_decays or {}
self.init_verbose = init_verbose
self.use_cosine_annealing = use_cosine_annealing
self.epsilon = epsilon or backend_config.epsilon()
self.amsgrad = amsgrad
_check_args(self, total_iterations, use_cosine_annealing, weight_decays)
self._init_lr = kwargs.get('lr', learning_rate) # to print lr_mult setup
self._updates_processed = 0 # to track num calls to '_resource_apply_...'
self._init_notified = False
self._init_lr = kwargs.get('lr', learning_rate)
示例8: __init__
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import variable [as 別名]
def __init__(self, nb_labels,
weights=None,
input_type='prob',
dice_type='soft',
approx_hard_max=True,
vox_weights=None,
crop_indices=None,
re_norm=False,
area_reg=0.1): # regularization for bottom of Dice coeff
"""
input_type is 'prob', or 'max_label'
dice_type is hard or soft
approx_hard_max - see note below
Note: for hard dice, we grab the most likely label and then compute a
one-hot encoding for each voxel with respect to possible labels. To grab the most
likely labels, argmax() can be used, but only when Dice is used as a metric
For a Dice *loss*, argmax is not differentiable, and so we can't use it
Instead, we approximate the prob->one_hot translation when approx_hard_max is True.
"""
self.nb_labels = nb_labels
self.weights = None if weights is None else K.variable(weights)
self.vox_weights = None if vox_weights is None else K.variable(vox_weights)
self.input_type = input_type
self.dice_type = dice_type
self.approx_hard_max = approx_hard_max
self.area_reg = area_reg
self.crop_indices = crop_indices
self.re_norm = re_norm
if self.crop_indices is not None and vox_weights is not None:
self.vox_weights = utils.batch_gather(self.vox_weights, self.crop_indices)
示例9: loss
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import variable [as 別名]
def loss(self, y_true, y_pred):
total_loss = K.variable(0)
for idx, loss in enumerate(self.losses):
total_loss += self.loss_weights[idx] * loss(y_true, y_pred)
return total_loss
示例10: __init__
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import variable [as 別名]
def __init__(self, cap=100, **kwargs):
self.cap = K.variable(cap, dtype='float32')
super(MeanStream, self).__init__(**kwargs)
示例11: output_init
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import variable [as 別名]
def output_init(shape, name=None, dim_ordering=None):
''' initialization for output weights'''
size = (shape[0], shape[1], shape[2] - shape[3], shape[3])
# initialize output weights with random and identity
rpart = np.random.random(size)
# idpart_ = np.eye(size[3])
idpart_ = np.ones((size[3], size[3]))
idpart = np.expand_dims(np.expand_dims(idpart_, 0), 0)
value = np.concatenate((rpart, idpart), axis=2)
return K.variable(value, name=name)
示例12: softmax_ratio
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import variable [as 別名]
def softmax_ratio(y_true, y_pred):
anchor, positive, negative = tf.unstack(y_pred)
positive_distance = _euclidean_distance(anchor, positive)
negative_distance = _euclidean_distance(anchor, negative)
softmax = K.softmax(K.concatenate([positive_distance, negative_distance]))
ideal_distance = K.variable([0, 1])
return K.mean(K.maximum(softmax - ideal_distance, 0))
示例13: softmax_ratio_pn
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import variable [as 別名]
def softmax_ratio_pn(y_true, y_pred):
anchor, positive, negative = tf.unstack(y_pred)
anchor_positive_distance = _euclidean_distance(anchor, positive)
anchor_negative_distance = _euclidean_distance(anchor, negative)
positive_negative_distance = _euclidean_distance(positive, negative)
minimum_distance = K.min(K.concatenate([anchor_negative_distance, positive_negative_distance]), axis=-1, keepdims=True)
softmax = K.softmax(K.concatenate([anchor_positive_distance, minimum_distance]))
ideal_distance = K.variable([0, 1])
return K.mean(K.maximum(softmax - ideal_distance, 0))
示例14: get_time
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import variable [as 別名]
def get_time(self):
"""Get simulation time variable.
Returns
-------
time: float
Current simulation time.
"""
return k.get_value(self.time)
示例15: set_time
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import variable [as 別名]
def set_time(self, time):
"""Set simulation time variable.
Parameters
----------
time: float
Current simulation time.
"""
k.set_value(self.time, time)