當前位置: 首頁>>代碼示例>>Python>>正文


Python backend.cast方法代碼示例

本文整理匯總了Python中tensorflow.keras.backend.cast方法的典型用法代碼示例。如果您正苦於以下問題:Python backend.cast方法的具體用法?Python backend.cast怎麽用?Python backend.cast使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.keras.backend的用法示例。


在下文中一共展示了backend.cast方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: call

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import cast [as 別名]
def call(self, x, mask=None):
        """Layer functionality."""
        # Skip integration of input spikes in membrane potential. Directly
        # transmit new spikes. The output psp is nonzero wherever there has
        # been an input spike at any time during simulation.

        input_psp = MaxPooling2D.call(self, x)

        if self.spiketrain is not None:
            new_spikes = tf.math.logical_xor(
                k.greater(input_psp, 0), k.greater(self.last_spiketimes, 0))
            self.add_update([(self.spiketrain,
                              self.time * k.cast(new_spikes, k.floatx()))])

        psp = self.get_psp(input_psp)

        return k.cast(psp, k.floatx()) 
開發者ID:NeuromorphicProcessorProject,項目名稱:snn_toolbox,代碼行數:19,代碼來源:ttfs.py

示例2: tversky_loss

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import cast [as 別名]
def tversky_loss(y_true, y_pred, smooth=0.000001):
    # Define alpha and beta
    alpha = 0.5
    beta  = 0.5
    # Calculate Tversky for each class
    axis = identify_axis(y_true.get_shape())
    tp = K.sum(y_true * y_pred, axis=axis)
    fn = K.sum(y_true * (1-y_pred), axis=axis)
    fp = K.sum((1-y_true) * y_pred, axis=axis)
    tversky_class = (tp + smooth)/(tp + alpha*fn + beta*fp + smooth)
    # Sum up classes to one score
    tversky = K.sum(tversky_class, axis=[-1])
    # Identify number of classes
    n = K.cast(K.shape(y_true)[-1], 'float32')
    # Return Tversky
    return n-tversky

#-----------------------------------------------------#
#             Tversky & Crossentropy loss             #
#-----------------------------------------------------# 
開發者ID:frankkramer-lab,項目名稱:MIScnn,代碼行數:22,代碼來源:metrics.py

示例3: cat_acc

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import cast [as 別名]
def cat_acc(y_true, y_pred):
    """Keras loss function for sparse_categorical_accuracy.

    :param y_true: tensor of true class labels.
    :param y_pred: class output scores from network.

    :returns: categorical accuracy.
    """
    # sparse_categorical_accuracy is broken in keras 2.2.4
    #   https://github.com/keras-team/keras/issues/11348#issuecomment-439969957
    # this is taken from e59570ae
    from tensorflow.keras import backend as K
    # reshape in case it's in shape (num_samples, 1) instead of (num_samples,)
    if K.ndim(y_true) == K.ndim(y_pred):
        y_true = K.squeeze(y_true, -1)
    # convert dense predictions to labels
    y_pred_labels = K.argmax(y_pred, axis=-1)
    y_pred_labels = K.cast(y_pred_labels, K.floatx())
    return K.cast(K.equal(y_true, y_pred_labels), K.floatx()) 
開發者ID:nanoporetech,項目名稱:medaka,代碼行數:21,代碼來源:training.py

示例4: _find_maxima

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import cast [as 別名]
def _find_maxima(x, coordinate_scale=1, confidence_scale=255.0):

    x = K.cast(x, K.floatx())

    col_max = K.max(x, axis=1)
    row_max = K.max(x, axis=2)

    maxima = K.max(col_max, 1)
    maxima = K.expand_dims(maxima, -2) / confidence_scale

    cols = K.cast(K.argmax(col_max, -2), K.floatx())
    rows = K.cast(K.argmax(row_max, -2), K.floatx())
    cols = K.expand_dims(cols, -2) * coordinate_scale
    rows = K.expand_dims(rows, -2) * coordinate_scale

    maxima = K.concatenate([cols, rows, maxima], -2)

    return maxima 
開發者ID:jgraving,項目名稱:DeepPoseKit,代碼行數:20,代碼來源:backend.py

示例5: _preprocess_conv2d_input

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import cast [as 別名]
def _preprocess_conv2d_input(x, data_format):
    """Transpose and cast the input before the conv2d.
    # Arguments
        x: input tensor.
        data_format: string, `"channels_last"` or `"channels_first"`.
    # Returns
        A tensor.
    """
    if dtype(x) == "float64":
        x = tf.cast(x, "float32")
    if data_format == "channels_first":
        # TF uses the last dimension as channel dimension,
        # instead of the 2nd one.
        # TH input shape: (samples, input_depth, rows, cols)
        # TF input shape: (samples, rows, cols, input_depth)
        x = tf.transpose(x, (0, 2, 3, 1))
    return x 
開發者ID:jgraving,項目名稱:DeepPoseKit,代碼行數:19,代碼來源:backend.py

示例6: recall

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import cast [as 別名]
def recall(y_true, y_pred):
    """Precision for foreground pixels.

    Calculates pixelwise recall TP/(TP + FN).

    """
    # count true positives
    truth = K.round(K.clip(y_true, K.epsilon(), 1))
    pred_pos = K.round(K.clip(y_pred, K.epsilon(), 1))
    true_pos = K.sum(K.cast(K.all(K.stack([truth, pred_pos], axis=2), axis=2),
                            dtype='float64'))
    truth_ct = K.sum(K.round(K.clip(y_true, K.epsilon(), 1)))
    if truth_ct == 0:
        return 0
    recall = true_pos/truth_ct

    return recall 
開發者ID:CosmiQ,項目名稱:solaris,代碼行數:19,代碼來源:metrics.py

示例7: loss

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import cast [as 別名]
def loss(self, y_true, y_pred):
        """ categorical crossentropy loss """

        if self.crop_indices is not None:
            y_true = utils.batch_gather(y_true, self.crop_indices)
            y_pred = utils.batch_gather(y_pred, self.crop_indices)

        if self.use_float16:
            y_true = K.cast(y_true, 'float16')
            y_pred = K.cast(y_pred, 'float16')

        # scale and clip probabilities
        # this should not be necessary for softmax output.
        y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
        y_pred = K.clip(y_pred, K.epsilon(), 1)

        # compute log probability
        log_post = K.log(y_pred)  # likelihood

        # loss
        loss = - y_true * log_post

        # weighted loss
        if self.weights is not None:
            loss *= self.weights

        if self.vox_weights is not None:
            loss *= self.vox_weights

        # take the total loss
        # loss = K.batch_flatten(loss)
        mloss = K.mean(K.sum(K.cast(loss, 'float32'), -1))
        tf.verify_tensor_all_finite(mloss, 'Loss not finite')
        return mloss 
開發者ID:adalca,項目名稱:neuron,代碼行數:36,代碼來源:metrics.py

示例8: loss_function

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import cast [as 別名]
def loss_function(self, inputs, outputs):
        total_loss = 0
        total_metrics = {}

        if len(self._tasks) == 1:
            inputs = (inputs,)

        try:
            batch_size = K.cast(tf.shape(inputs[0]['primary'])[0], K.floatx())
        except KeyError:
            batch_size = K.cast(tf.shape(inputs[0]['first']['primary'])[0], K.floatx())

        for input_, output, task in zip(inputs, outputs, self._tasks):
            loss, metrics = task.loss_function(input_, output)
            loss = tf.check_numerics(loss, 'loss from {}'.format(task.__class__.__name__))
            for name, value in metrics.items():
                metrics[name] = tf.check_numerics(value, 'metric {}'.format(name))
            total_loss += loss
            total_metrics.update(metrics)

        total_loss *= batch_size
        for key in total_metrics:
            total_metrics[key] *= batch_size
        total_metrics['batch_size'] = batch_size

        return total_loss, total_metrics 
開發者ID:songlab-cal,項目名稱:tape-neurips2019,代碼行數:28,代碼來源:experiments.py

示例9: piecewise_linear

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import cast [as 別名]
def piecewise_linear(t, schedule):
    """分段線性函數
    其中schedule是形如{1000: 1, 2000: 0.1}的字典,
    表示 t ∈ [0, 1000]時,輸出從0均勻增加至1,而
    t ∈ [1000, 2000]時,輸出從1均勻降低到0.1,最後
    t > 2000時,保持0.1不變。
    """
    schedule = sorted(schedule.items())
    if schedule[0][0] != 0:
        schedule = [(0, 0.0)] + schedule

    x = K.constant(schedule[0][1], dtype=K.floatx())
    t = K.cast(t, K.floatx())
    for i in range(len(schedule)):
        t_begin = schedule[i][0]
        x_begin = x
        if i != len(schedule) - 1:
            dx = schedule[i + 1][1] - schedule[i][1]
            dt = schedule[i + 1][0] - schedule[i][0]
            slope = 1.0 * dx / dt
            x = schedule[i][1] + slope * (t - t_begin)
        else:
            x = K.constant(schedule[i][1], dtype=K.floatx())
        x = K.switch(t >= t_begin, x, x_begin)

    return x 
開發者ID:bojone,項目名稱:bert4keras,代碼行數:28,代碼來源:backend.py

示例10: adjusted_score

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import cast [as 別名]
def adjusted_score(hyper_model, delta, metric_function=None):
    def score(y_true, y_pred):
      y_t_rank = len(y_true.shape.as_list())
      y_p_rank = len(y_pred.shape.as_list())
      y_t_last_dim = y_true.shape.as_list()[-1]
      y_p_last_dim = y_pred.shape.as_list()[-1]

      is_binary = y_p_last_dim == 1
      is_sparse_categorical = (
          y_t_rank < y_p_rank or y_t_last_dim == 1 and y_p_last_dim > 1)

      if isinstance(metric_function, six.string_types):
        if metric_function in ["accuracy", "acc"]:
          if is_binary:
            metric = binary_accuracy(y_true, y_pred)
          elif is_sparse_categorical:
            metric = sparse_categorical_accuracy(y_true, y_pred)
          else:
            metric = categorical_accuracy(y_true, y_pred)
        else:
          metric = categorical_accuracy(y_true, y_pred)
      else:
        metric = metric_function(y_true, y_pred)

      return K.cast(metric * (1.0 + delta), K.floatx())

    if not metric_function:
      metric_function = "accuracy"

    return score 
開發者ID:google,項目名稱:qkeras,代碼行數:32,代碼來源:autoqkeras_internal.py

示例11: trial_size_metric

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import cast [as 別名]
def trial_size_metric(trial_size):
    def trial(y_true, y_pred):  # pylint: disable=unused-argument
      return K.cast(trial_size, K.floatx())
    return trial 
開發者ID:google,項目名稱:qkeras,代碼行數:6,代碼來源:autoqkeras_internal.py

示例12: stochastic_round_po2

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import cast [as 別名]
def stochastic_round_po2(x):
  """Performs stochastic rounding for the power of two."""
  # TODO(hzhuang): test stochastic_round_po2 and constraint.
  # because quantizer is applied after constraint.
  y = tf.abs(x)
  eps = tf.keras.backend.epsilon()
  log2 = tf.keras.backend.log(2.0)

  x_log2 = tf.round(tf.keras.backend.log(y + eps) / log2)
  po2 = tf.cast(pow(2.0, tf.cast(x_log2, dtype="float32")), dtype="float32")
  left_val = tf.where(po2 > y, x_log2 - 1, x_log2)
  right_val = tf.where(po2 > y, x_log2, x_log2 + 1)
  # sampling in [2**left_val, 2**right_val].
  minval = 2 ** left_val
  maxval = 2 ** right_val
  val = tf.random.uniform(tf.shape(y), minval=minval, maxval=maxval)
  # use y as a threshold to keep the probabliy [2**left_val, y, 2**right_val]
  # so that the mean value of the sample should be y
  x_po2 = tf.where(y < val, left_val, right_val)
  """
  x_log2 = stochastic_round(tf.keras.backend.log(y + eps) / log2)
  sign = tf.sign(x)
  po2 = (
      tf.sign(x) *
      tf.cast(pow(2.0, tf.cast(x_log2, dtype="float32")), dtype="float32")
  )
  """
  return x_po2 
開發者ID:google,項目名稱:qkeras,代碼行數:30,代碼來源:quantizers.py

示例13: call

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import cast [as 別名]
def call(self, inputs, **kwargs):
        return K.cast(K.argmin(inputs, axis=1), dtype=K.floatx()) 
開發者ID:tslearn-team,項目名稱:tslearn,代碼行數:4,代碼來源:shapelets.py

示例14: update_neurons

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import cast [as 別名]
def update_neurons(self):
        """Update neurons according to activation function."""

        # Update membrane potentials.
        new_mem = self.get_new_mem()

        # Generate spikes.
        if hasattr(self, 'activation_str') \
                and self.activation_str == 'softmax':
            output_spikes = self.softmax_activation(new_mem)
        else:
            output_spikes = self.linear_activation(new_mem)

        # Reset membrane potential after spikes.
        self.set_reset_mem(new_mem, output_spikes)

        # Store refractory period after spikes.
        if hasattr(self, 'activation_str') \
                and self.activation_str == 'softmax':
            # We do not constrain softmax output neurons.
            new_refrac = tf.identity(self.refrac_until)
        else:
            new_refrac = tf.where(k.not_equal(output_spikes, 0),
                                  k.ones_like(output_spikes) *
                                  (self.time + self.tau_refrac),
                                  self.refrac_until)
        self.add_update([(self.refrac_until, new_refrac)])

        if self.spiketrain is not None:
            self.add_update([(self.spiketrain, self.time * k.cast(
                k.not_equal(output_spikes, 0), k.floatx()))])

        # Compute post-synaptic potential.
        psp = self.get_psp(output_spikes)

        return k.cast(psp, k.floatx()) 
開發者ID:NeuromorphicProcessorProject,項目名稱:snn_toolbox,代碼行數:38,代碼來源:ttfs.py

示例15: linear_activation

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import cast [as 別名]
def linear_activation(self, mem):
        """Linear activation."""
        return k.cast(k.greater_equal(mem, self.v_thresh), k.floatx()) 
開發者ID:NeuromorphicProcessorProject,項目名稱:snn_toolbox,代碼行數:5,代碼來源:ttfs.py


注:本文中的tensorflow.keras.backend.cast方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。