當前位置: 首頁>>代碼示例>>Python>>正文


Python backend.stack方法代碼示例

本文整理匯總了Python中tensorflow.keras.backend.stack方法的典型用法代碼示例。如果您正苦於以下問題:Python backend.stack方法的具體用法?Python backend.stack怎麽用?Python backend.stack使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.keras.backend的用法示例。


在下文中一共展示了backend.stack方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: split_heads_2d

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import stack [as 別名]
def split_heads_2d(self, ip):
        tensor_shape = K.shape(ip)

        # batch, height, width, channels for axis = -1
        tensor_shape = [tensor_shape[i] for i in range(len(self._shape))]

        batch = tensor_shape[0]
        height = tensor_shape[1]
        width = tensor_shape[2]
        channels = tensor_shape[3]

        # Save the spatial tensor dimensions
        self._batch = batch
        self._height = height
        self._width = width

        ret_shape = K.stack([batch, height, width,  self.num_heads, channels // self.num_heads])
        split = K.reshape(ip, ret_shape)
        transpose_axes = (0, 3, 1, 2, 4)
        split = K.permute_dimensions(split, transpose_axes)

        return split 
開發者ID:titu1994,項目名稱:keras-attention-augmented-convs,代碼行數:24,代碼來源:attn_augconv.py

示例2: build

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import stack [as 別名]
def build(self, input_shape):
        assert len(input_shape) >= 2
        F = input_shape[0][-1]

        # Create weights for parallel stacks
        # self.kernels[k][i] refers to the k-th stack, i-th iteration
        self.kernels = []
        for k in range(self.order):
            kernel_stack = []
            current_shape = F
            for i in range(self.iterations):
                kernel_stack.append(
                    self.create_weights(current_shape, F, self.channels,
                                        'ARMA_GCS_{}{}'.format(k, i))
                )
                current_shape = self.channels
                if self.share_weights and i == 1:
                    # No need to continue because all following weights will be shared
                    break
            self.kernels.append(kernel_stack)
        self.built = True 
開發者ID:danielegrattarola,項目名稱:spektral,代碼行數:23,代碼來源:arma_conv.py

示例3: call

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import stack [as 別名]
def call(self, inputs):
        features = inputs[0]
        fltr = inputs[1]

        # Convolution
        output = []  # Stores the parallel filters
        for k in range(self.order):
            output_k = features
            for i in range(self.iterations):
                output_k = self.gcs([output_k, features, fltr], k, i)
            output.append(output_k)

        # Average stacks
        output = K.stack(output, axis=-1)
        output = K.mean(output, axis=-1)
        output = self.activation(output)

        return output 
開發者ID:danielegrattarola,項目名稱:spektral,代碼行數:20,代碼來源:arma_conv.py

示例4: triplet_network

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import stack [as 別名]
def triplet_network(base_network, embedding_dims=2, embedding_l2=0.0):
    def output_shape(shapes):
        shape1, shape2, shape3 = shapes
        return (3, shape1[0],)

    input_a = Input(shape=base_network.input_shape[1:])
    input_p = Input(shape=base_network.input_shape[1:])
    input_n = Input(shape=base_network.input_shape[1:])

    embeddings = Dense(embedding_dims,
                       kernel_regularizer=l2(embedding_l2))(base_network.output)
    network = Model(base_network.input, embeddings)

    processed_a = network(input_a)
    processed_p = network(input_p)
    processed_n = network(input_n)

    triplet = Lambda(K.stack,
                     output_shape=output_shape,
                     name='stacked_triplets')([processed_a,
                                               processed_p,
                                               processed_n],)
    model = Model([input_a, input_p, input_n], triplet)

    return model, processed_a, processed_p, processed_n 
開發者ID:beringresearch,項目名稱:ivis,代碼行數:27,代碼來源:network.py

示例5: call

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import stack [as 別名]
def call(self, inputs):
        def brelu(x):
            # get shape of X, we are interested in the last axis, which is constant
            shape = K.int_shape(x)
            # last axis
            dim = shape[-1]
            # half of the last axis (+1 if necessary)
            dim2 = dim // 2
            if dim % 2 != 0:
                dim2 += 1
            # multiplier will be a tensor of alternated +1 and -1
            multiplier = K.ones((dim2,))
            multiplier = K.stack([multiplier, -multiplier], axis=-1)
            if dim % 2 != 0:
                multiplier = multiplier[:-1]
            # adjust multiplier shape to the shape of x
            multiplier = K.reshape(multiplier, tuple(1 for _ in shape[:-1]) + (-1,))
            return multiplier * tf.nn.relu(multiplier * x)

        return Lambda(brelu)(inputs) 
開發者ID:digantamisra98,項目名稱:Echo,代碼行數:22,代碼來源:custom_activation.py

示例6: rel_to_abs

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import stack [as 別名]
def rel_to_abs(self, x):
        shape = K.shape(x)
        shape = [shape[i] for i in range(3)]
        B, Nh, L, = shape
        col_pad = K.zeros(K.stack([B, Nh, L, 1]))
        x = K.concatenate([x, col_pad], axis=3)
        flat_x = K.reshape(x, [B, Nh, L * 2 * L])
        flat_pad = K.zeros(K.stack([B, Nh, L - 1]))
        flat_x_padded = K.concatenate([flat_x, flat_pad], axis=2)
        final_x = K.reshape(flat_x_padded, [B, Nh, L + 1, 2 * L - 1])
        final_x = final_x[:, :, :L, L - 1:]
        return final_x 
開發者ID:titu1994,項目名稱:keras-attention-augmented-convs,代碼行數:14,代碼來源:attn_augconv.py

示例7: combine_heads_2d

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import stack [as 別名]
def combine_heads_2d(self, inputs):
        # [batch, num_heads, height, width, depth_v // num_heads]
        transposed = K.permute_dimensions(inputs, [0, 2, 3, 1, 4])
        # [batch, height, width, num_heads, depth_v // num_heads]
        shape = K.shape(transposed)
        shape = [shape[i] for i in range(5)]

        a, b = shape[-2:]
        ret_shape = K.stack(shape[:-2] + [a * b])
        # [batch, height, width, depth_v]
        return K.reshape(transposed, ret_shape) 
開發者ID:titu1994,項目名稱:keras-attention-augmented-convs,代碼行數:13,代碼來源:attn_augconv.py

示例8: gcs

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import stack [as 別名]
def gcs(self, inputs, stack, iteration):
        """
        Creates a graph convolutional layer with a skip connection.
        :param inputs: list of input Tensors, namely
            - input node features
            - input node features for the skip connection
            - normalized adjacency matrix;
        :param stack: int, current stack (used to retrieve kernels);
        :param iteration: int, current iteration (used to retrieve kernels);
        :return: output node features.
        """
        X = inputs[0]
        X_skip = inputs[1]
        fltr = inputs[2]

        if self.share_weights and iteration >= 1:
            iter = 1
        else:
            iter = iteration
        kernel_1, kernel_2, bias = self.kernels[stack][iter]

        # Convolution
        output = K.dot(X, kernel_1)
        output = ops.filter_dot(fltr, output)

        # Skip connection
        skip = K.dot(X_skip, kernel_2)
        skip = Dropout(self.dropout_rate)(skip)
        output += skip

        if self.use_bias:
            output = K.bias_add(output, bias)
        output = self.gcn_activation(output)
        return output 
開發者ID:danielegrattarola,項目名稱:spektral,代碼行數:36,代碼來源:arma_conv.py

示例9: batch_gather

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import stack [as 別名]
def batch_gather(reference, indices):
    """
    C+P From Keras pull request https://github.com/keras-team/keras/pull/6377/files
    
    Batchwise gathering of row indices.

    The numpy equivalent is `reference[np.arange(batch_size), indices]`, where
    `batch_size` is the first dimension of the reference tensor.

    # Arguments
        reference: A tensor with ndim >= 2 of shape.
          (batch_size, dim1, dim2, ..., dimN)
        indices: A 1d integer tensor of shape (batch_size) satisfying
          0 <= i < dim2 for each element i.

    # Returns
        The selected tensor with shape (batch_size, dim2, ..., dimN).

    # Examples
        1. If reference is `[[3, 5, 7], [11, 13, 17]]` and indices is `[2, 1]`
        then the result is `[7, 13]`.

        2. If reference is
        ```
          [[[2, 3], [4, 5], [6, 7]],
           [[10, 11], [12, 13], [16, 17]]]
        ```
        and indices is `[2, 1]` then the result is `[[6, 7], [12, 13]]`.
    """
    batch_size = K.shape(reference)[0]
    indices = tf.stack([tf.range(batch_size), indices], axis=1)
    return tf.gather_nd(reference, indices) 
開發者ID:adalca,項目名稱:neuron,代碼行數:34,代碼來源:utils.py

示例10: _ctdet_decode

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import stack [as 別名]
def _ctdet_decode(hm, reg, wh, k=100, output_stride=4):
  hm = K.sigmoid(hm)
  hm = _nms(hm)
  hm_shape = K.shape(hm)
  reg_shape = K.shape(reg)
  wh_shape = K.shape(wh)
  batch, width, cat = hm_shape[0], hm_shape[2], hm_shape[3]

  hm_flat = K.reshape(hm, (batch, -1))
  reg_flat = K.reshape(reg, (reg_shape[0], -1, reg_shape[-1]))
  wh_flat = K.reshape(wh, (wh_shape[0], -1, wh_shape[-1]))

  def _process_sample(args):
    _hm, _reg, _wh = args
    _scores, _inds = tf.math.top_k(_hm, k=k, sorted=True)
    _classes = K.cast(_inds % cat, 'float32')
    _inds = K.cast(_inds / cat, 'int32')
    _xs = K.cast(_inds % width, 'float32')
    _ys = K.cast(K.cast(_inds / width, 'int32'), 'float32')
    _wh = K.gather(_wh, _inds)
    _reg = K.gather(_reg, _inds)

    _xs = _xs + _reg[..., 0]
    _ys = _ys + _reg[..., 1]

    _x1 = _xs - _wh[..., 0] / 2
    _y1 = _ys - _wh[..., 1] / 2
    _x2 = _xs + _wh[..., 0] / 2
    _y2 = _ys + _wh[..., 1] / 2

    # rescale to image coordinates
    _x1 = output_stride * _x1
    _y1 = output_stride * _y1
    _x2 = output_stride * _x2
    _y2 = output_stride * _y2

    _detection = K.stack([_x1, _y1, _x2, _y2, _scores, _classes], -1)
    return _detection

  detections = K.map_fn(_process_sample, [hm_flat, reg_flat, wh_flat], dtype=K.floatx())
  return detections 
開發者ID:1044197988,項目名稱:Centernet-Tensorflow2.0,代碼行數:43,代碼來源:decode.py

示例11: yolo2_eval

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import stack [as 別名]
def yolo2_eval(yolo_outputs,
              image_shape,
              max_boxes=10,
              score_threshold=.6,
              iou_threshold=.5):
    """Evaluate YOLOv2 model on given input batch and return filtered boxes."""
    box_xy, box_wh, box_confidence, box_class_probs = yolo_outputs
    boxes = yolo2_boxes_to_corners(box_xy, box_wh)
    boxes, scores, classes = yolo2_filter_boxes(
        boxes, box_confidence, box_class_probs, threshold=score_threshold)

    # Scale boxes back to original image shape.
    height = image_shape[0]
    width = image_shape[1]
    image_dims = K.stack([height, width, height, width])
    image_dims = K.reshape(image_dims, [1, 4])
    boxes = boxes * image_dims

    # TODO: Something must be done about this ugly hack!
    max_boxes_tensor = K.constant(max_boxes, dtype='int32')
    K.get_session().run(tf.variables_initializer([max_boxes_tensor]))
    nms_index = tf.image.non_max_suppression(
        boxes, scores, max_boxes_tensor, iou_threshold=iou_threshold)
    boxes = K.gather(boxes, nms_index)
    scores = K.gather(scores, nms_index)
    classes = K.gather(classes, nms_index)
    return boxes, scores, classes 
開發者ID:david8862,項目名稱:keras-YOLOv3-model-set,代碼行數:29,代碼來源:postprocess.py

示例12: keras_composite_loss

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import stack [as 別名]
def keras_composite_loss(loss_dict, weight_dict, custom_losses=None):
    """Wrapper to other loss functions to create keras-compatible composite."""

    def composite(y_true, y_pred):
        loss = K.sum(K.flatten(K.stack([weight_dict[loss_name]*get_single_loss(
                'keras', loss_name, loss_params, custom_losses)(y_true, y_pred)
                for loss_name, loss_params in loss_dict.items()], axis=-1)))
        return loss

    return composite 
開發者ID:CosmiQ,項目名稱:solaris,代碼行數:12,代碼來源:losses.py

示例13: call

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import stack [as 別名]
def call(self, inputs, **kwargs):
        if self.axis == 1:
            # If channels first, force it to be channels last for these ops
            inputs = K.permute_dimensions(inputs, [0, 2, 3, 1])

        q, k, v = tf.split(inputs, [self.depth_k, self.depth_k, self.depth_v], axis=-1)

        q = self.split_heads_2d(q)
        k = self.split_heads_2d(k)
        v = self.split_heads_2d(v)

        # scale query
        depth_k_heads = self.depth_k / self.num_heads
        q *= (depth_k_heads ** -0.5)

        # [Batch, num_heads, height * width, depth_k or depth_v] if axis == -1
        qk_shape = [self._batch, self.num_heads, self._height * self._width, self.depth_k // self.num_heads]
        v_shape = [self._batch, self.num_heads, self._height * self._width, self.depth_v // self.num_heads]
        flat_q = K.reshape(q, K.stack(qk_shape))
        flat_k = K.reshape(k, K.stack(qk_shape))
        flat_v = K.reshape(v, K.stack(v_shape))

        # [Batch, num_heads, HW, HW]
        logits = tf.matmul(flat_q, flat_k, transpose_b=True)

        # Apply relative encodings
        if self.relative:
            h_rel_logits, w_rel_logits = self.relative_logits(q)
            logits += h_rel_logits
            logits += w_rel_logits

        weights = K.softmax(logits, axis=-1)
        attn_out = tf.matmul(weights, flat_v)

        attn_out_shape = [self._batch, self.num_heads, self._height, self._width, self.depth_v // self.num_heads]
        attn_out_shape = K.stack(attn_out_shape)
        attn_out = K.reshape(attn_out, attn_out_shape)
        attn_out = self.combine_heads_2d(attn_out)
        # [batch, height, width, depth_v]

        if self.axis == 1:
            # return to [batch, depth_v, height, width] for channels first
            attn_out = K.permute_dimensions(attn_out, [0, 3, 1, 2])

        attn_out.set_shape(self.compute_output_shape(self._shape))

        return attn_out 
開發者ID:titu1994,項目名稱:keras-attention-augmented-convs,代碼行數:49,代碼來源:attn_augconv.py

示例14: call

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import stack [as 別名]
def call(self, inputs, **kwargs):
        input_shape = K.int_shape(inputs)
        tensor_input_shape = K.shape(inputs)

        # Prepare broadcasting shape.
        reduction_axes = list(range(len(input_shape)))
        del reduction_axes[self.axis]
        broadcast_shape = [1] * len(input_shape)
        broadcast_shape[self.axis] = input_shape[self.axis] // self.groups
        broadcast_shape.insert(1, self.groups)

        reshape_group_shape = K.shape(inputs)
        group_axes = [reshape_group_shape[i] for i in range(len(input_shape))]
        group_axes[self.axis] = input_shape[self.axis] // self.groups
        group_axes.insert(1, self.groups)

        # reshape inputs to new group shape
        group_shape = [group_axes[0], self.groups] + group_axes[2:]
        group_shape = K.stack(group_shape)
        inputs = K.reshape(inputs, group_shape)

        group_reduction_axes = list(range(len(group_axes)))
        group_reduction_axes = group_reduction_axes[2:]

        mean = K.mean(inputs, axis=group_reduction_axes, keepdims=True)
        variance = K.var(inputs, axis=group_reduction_axes, keepdims=True)

        inputs = (inputs - mean) / (K.sqrt(variance + self.epsilon))

        # prepare broadcast shape
        inputs = K.reshape(inputs, group_shape)
        outputs = inputs

        # In this case we must explicitly broadcast all parameters.
        if self.scale:
            broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
            outputs = outputs * broadcast_gamma

        if self.center:
            broadcast_beta = K.reshape(self.beta, broadcast_shape)
            outputs = outputs + broadcast_beta

        outputs = K.reshape(outputs, tensor_input_shape)

        return outputs 
開發者ID:sandialabs,項目名稱:bcnn,代碼行數:47,代碼來源:groupnorm.py

示例15: gaussian_kernel

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import stack [as 別名]
def gaussian_kernel(sigma, windowsize=None, indexing='ij'):
    """
    sigma will be a number of a list of numbers.

    # some guidance from my MATLAB file 
    https://github.com/adalca/mivt/blob/master/src/gaussFilt.m

    Parameters:
        sigma: scalar or list of scalars
        windowsize (optional): scalar or list of scalars indicating the shape of the kernel
    
    Returns:
        ND kernel the same dimensiosn as the number of sigmas.

    Todo: could use MultivariateNormalDiag
    """

    if not isinstance(sigma, (list, tuple)):
        sigma = [sigma]
    sigma = [np.maximum(f, np.finfo(float).eps) for f in sigma]

    nb_dims = len(sigma)

    # compute windowsize
    if windowsize is None:
        windowsize = [np.round(f * 3) * 2 + 1 for f in sigma]

    if len(sigma) != len(windowsize):
        raise ValueError('sigma and windowsize should have the same length.'
                         'Got vectors: ' + str(sigma) + 'and' + str(windowsize))

    # ok, let's get to work.
    mid = [(w - 1)/2 for w in windowsize]

    # list of volume ndgrid
    # N-long list, each entry of shape volshape
    mesh = volshape_to_meshgrid(windowsize, indexing=indexing)  
    mesh = [tf.cast(f, 'float32') for f in mesh]

    # compute independent gaussians
    diff = [mesh[f] - mid[f] for f in range(len(windowsize))]
    exp_term = [- K.square(diff[f])/(2 * (sigma[f]**2)) for f in range(nb_dims)]
    norms = [exp_term[f] - np.log(sigma[f] * np.sqrt(2 * np.pi)) for f in range(nb_dims)]

    # add an all-ones entry and transform into a large matrix
    norms_matrix = tf.stack(norms, axis=-1)  # *volshape x N
    g = K.sum(norms_matrix, -1)  # volshape
    g = tf.exp(g)
    g /= tf.reduce_sum(g)

    return g 
開發者ID:adalca,項目名稱:neuron,代碼行數:53,代碼來源:utils.py


注:本文中的tensorflow.keras.backend.stack方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。