当前位置: 首页>>代码示例>>Python>>正文


Python utils.softmax方法代码示例

本文整理汇总了Python中utils.softmax方法的典型用法代码示例。如果您正苦于以下问题:Python utils.softmax方法的具体用法?Python utils.softmax怎么用?Python utils.softmax使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在utils的用法示例。


在下文中一共展示了utils.softmax方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test

# 需要导入模块: import utils [as 别名]
# 或者: from utils import softmax [as 别名]
def test(self, inputs, label, timestep):
    '''Test model.
    param timestep: split into segments of length timestep.
    '''
    for embed in self.embeds:
      embed.eval()

    input = Variable(inputs[0].cuda(), requires_grad=False)
    label = Variable(label.cuda(), requires_grad=False)
    length = input.size(1)

    # Split video into segments
    input, start_indices = utils.get_segments(input, timestep)
    inputs = [input]

    logits, _ = self._forward(inputs)
    logits = utils.to_numpy(logits).squeeze(0)
    all_logits = [[] for i in range(length)]
    for i in range(len(start_indices)):
      s = start_indices[i]
      for j in range(timestep):
        all_logits[s + j].append(logits[i][j])
    # Average logits for each time step.
    final_logits = np.zeros((length, self.n_classes + 1))
    for i in range(length):
      final_logits[i] = np.mean(all_logits[i], axis=0)
    logits = final_logits

    info_acc = self._get_acc([torch.Tensor(logits)], label)
    scores = utils.softmax(logits, axis=1)
    return OrderedDict(info_acc), logits, scores 
开发者ID:google,项目名称:graph_distillation,代码行数:33,代码来源:model.py

示例2: __init__

# 需要导入模块: import utils [as 别名]
# 或者: from utils import softmax [as 别名]
def __init__(self, modalities, n_classes, n_frames, n_channels, input_sizes,
               hidden_size, n_layers, dropout, hidden_size_seq, n_layers_seq,
               dropout_seq, bg_w, lr, lr_decay_rate, to_idx, ckpt_path,
               w_losses, w_modalities, metric, xfer_to, gd_size, gd_reg):
    super(GraphDistillation, self).__init__(\
               modalities, n_classes, n_frames, n_channels, input_sizes,
               hidden_size, n_layers, dropout, hidden_size_seq, n_layers_seq, dropout_seq,
               bg_w, lr, lr_decay_rate, to_idx, ckpt_path)

    # Index of the modality to distill
    to_idx = self.modalities.index(xfer_to)
    from_idx = [x for x in range(len(self.modalities)) if x != to_idx]
    assert len(from_idx) >= 1

    # Prior
    w_modalities = [w_modalities[i] for i in from_idx
                   ]  # remove modality being transferred to
    gd_prior = utils.softmax(w_modalities, 0.25)
    # Distillation model
    self.distillation_kernel = \
        get_distillation_kernel(n_classes + 1, hidden_size, gd_size, to_idx, from_idx,
                                gd_prior, gd_reg, w_losses, metric).cuda()

    # Add optimizer to self.optimizers
    gd_optimizer = optim.SGD(
        self.distillation_kernel.parameters(),
        lr=lr,
        momentum=0.9,
        weight_decay=5e-4)
    self.optimizers.append(gd_optimizer)
    self.lr_decay_rates.append(lr_decay_rate)

    self.xfer_to = xfer_to
    self.to_idx = to_idx
    self.from_idx = from_idx 
开发者ID:google,项目名称:graph_distillation,代码行数:37,代码来源:model.py

示例3: test

# 需要导入模块: import utils [as 别名]
# 或者: from utils import softmax [as 别名]
def test(opt, model, dataloader):
  '''Test model.'''
  # Logging
  logger = logging.Logger(opt.load_ckpt_path, opt.split)
  stats = logging.Statistics(opt.ckpt_path, opt.split)
  logger.log(opt)

  logits, labels = [], []
  model.load(opt.load_ckpt_paths, opt.load_epoch)
  for step, data in enumerate(dataloader, 1):
    inputs, label = data
    info_acc, logit = model.test(inputs, label)
    logits.append(utils.to_numpy(logit.squeeze(0)))
    labels.append(utils.to_numpy(label))
    update = stats.update(label.size(0), info_acc)
    if utils.is_due(step, opt.print_every):
      utils.info('step {}/{}: {}'.format(step, len(dataloader), update))

  logits = np.concatenate(logits, axis=0)
  length, n_classes = logits.shape
  labels = np.concatenate(labels)
  scores = utils.softmax(logits, axis=1)

  # Accuracy
  preds = np.argmax(scores, axis=1)
  acc = np.sum(preds == labels) / length
  # Average precision
  y_true = np.zeros((length, n_classes))
  y_true[np.arange(length), labels] = 1
  aps = average_precision_score(y_true, scores, average=None)
  aps = list(filter(lambda x: not np.isnan(x), aps))
  mAP = np.mean(aps)

  logger.log('[Summary]: {}'.format(stats.summarize()))
  logger.log('Acc: {}, mAP: {}'.format(acc, mAP)) 
开发者ID:google,项目名称:graph_distillation,代码行数:37,代码来源:run.py

示例4: __init__

# 需要导入模块: import utils [as 别名]
# 或者: from utils import softmax [as 别名]
def __init__(self, modalities, n_classes, n_frames, n_channels, input_sizes,
               hidden_size, n_layers, dropout, lr, lr_decay_rate, ckpt_path,
               w_losses, w_modalities, metric, xfer_to, gd_size, gd_reg):
    super(GraphDistillation, self).__init__( \
               modalities, n_classes, n_frames, n_channels, input_sizes,
               hidden_size, n_layers, dropout, lr, lr_decay_rate, ckpt_path)

    # Index of the modality to distill
    to_idx = self.modalities.index(xfer_to)
    from_idx = [x for x in range(len(self.modalities)) if x != to_idx]
    assert len(from_idx) >= 1

    # Prior
    w_modalities = [w_modalities[i] for i in from_idx
                   ]  # remove modality being transferred to
    gd_prior = utils.softmax(w_modalities, 0.25)
    # Distillation model
    self.distillation_kernel = get_distillation_kernel(
        n_classes, hidden_size, gd_size, to_idx, from_idx, gd_prior, gd_reg,
        w_losses, metric).cuda()

    params = list(self.embeds[to_idx].parameters()) + \
             list(self.distillation_kernel.parameters())
    self.optimizer = optim.SGD(params, lr=lr, momentum=0.9, weight_decay=5e-4)

    self.xfer_to = xfer_to
    self.to_idx = to_idx
    self.from_idx = from_idx 
开发者ID:google,项目名称:graph_distillation,代码行数:30,代码来源:model.py

示例5: build_arch

# 需要导入模块: import utils [as 别名]
# 或者: from utils import softmax [as 别名]
def build_arch(self):
        with tf.variable_scope('Conv1_layer'):
            # Conv1, return tensor with shape [batch_size, 20, 20, 256]
            conv1 = tf.contrib.layers.conv2d(self.X, num_outputs=256,
                                             kernel_size=9, stride=1,
                                             padding='VALID')

        # Primary Capsules layer, return tensor with shape [batch_size, 1152, 8, 1]
        with tf.variable_scope('PrimaryCaps_layer'):
            primaryCaps = CapsLayer(num_outputs=32, vec_len=8, with_routing=False, layer_type='CONV')
            caps1 = primaryCaps(conv1, kernel_size=9, stride=2)

        # DigitCaps layer, return shape [batch_size, 10, 16, 1]
        with tf.variable_scope('DigitCaps_layer'):
            digitCaps = CapsLayer(num_outputs=self.num_label, vec_len=16, with_routing=True, layer_type='FC')
            self.caps2 = digitCaps(caps1)

        # Decoder structure in Fig. 2
        # 1. Do masking, how:
        with tf.variable_scope('Masking'):
            # a). calc ||v_c||, then do softmax(||v_c||)
            # [batch_size, 10, 16, 1] => [batch_size, 10, 1, 1]
            self.v_length = tf.sqrt(reduce_sum(tf.square(self.caps2),
                                               axis=2, keepdims=True) + epsilon)
            self.softmax_v = softmax(self.v_length, axis=1)
            # assert self.softmax_v.get_shape() == [cfg.batch_size, self.num_label, 1, 1]

            # b). pick out the index of max softmax val of the 10 caps
            # [batch_size, 10, 1, 1] => [batch_size] (index)
            self.argmax_idx = tf.to_int32(tf.argmax(self.softmax_v, axis=1))
            # assert self.argmax_idx.get_shape() == [cfg.batch_size, 1, 1]
            self.argmax_idx = tf.reshape(self.argmax_idx, shape=(cfg.batch_size, ))

            # Method 1.
            if not cfg.mask_with_y:
                # c). indexing
                # It's not easy to understand the indexing process with argmax_idx
                # as we are 3-dim animal
                masked_v = []
                for batch_size in range(cfg.batch_size):
                    v = self.caps2[batch_size][self.argmax_idx[batch_size], :]
                    masked_v.append(tf.reshape(v, shape=(1, 1, 16, 1)))

                self.masked_v = tf.concat(masked_v, axis=0)
                assert self.masked_v.get_shape() == [cfg.batch_size, 1, 16, 1]
            # Method 2. masking with true label, default mode
            else:
                self.masked_v = tf.multiply(tf.squeeze(self.caps2), tf.reshape(self.Y, (-1, self.num_label, 1)))
                self.v_length = tf.sqrt(reduce_sum(tf.square(self.caps2), axis=2, keepdims=True) + epsilon)

        # 2. Reconstructe the MNIST images with 3 FC layers
        # [batch_size, 1, 16, 1] => [batch_size, 16] => [batch_size, 512]
        with tf.variable_scope('Decoder'):
            vector_j = tf.reshape(self.masked_v, shape=(cfg.batch_size, -1))
            fc1 = tf.contrib.layers.fully_connected(vector_j, num_outputs=512)
            fc2 = tf.contrib.layers.fully_connected(fc1, num_outputs=1024)
            self.decoded = tf.contrib.layers.fully_connected(fc2,
                                                             num_outputs=self.height * self.width * self.channels,
                                                             activation_fn=tf.sigmoid) 
开发者ID:naturomics,项目名称:CapsNet-Tensorflow,代码行数:61,代码来源:capsNet.py

示例6: get_dilation_model_voc

# 需要导入模块: import utils [as 别名]
# 或者: from utils import softmax [as 别名]
def get_dilation_model_voc(input_shape, apply_softmax, input_tensor, classes):

    if input_tensor is None:
        model_in = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            model_in = Input(tensor=input_tensor, shape=input_shape)
        else:
            model_in = input_tensor

    h = Convolution2D(64, 3, 3, activation='relu', name='conv1_1')(model_in)
    h = Convolution2D(64, 3, 3, activation='relu', name='conv1_2')(h)
    h = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(h)
    h = Convolution2D(128, 3, 3, activation='relu', name='conv2_1')(h)
    h = Convolution2D(128, 3, 3, activation='relu', name='conv2_2')(h)
    h = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool2')(h)
    h = Convolution2D(256, 3, 3, activation='relu', name='conv3_1')(h)
    h = Convolution2D(256, 3, 3, activation='relu', name='conv3_2')(h)
    h = Convolution2D(256, 3, 3, activation='relu', name='conv3_3')(h)
    h = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool3')(h)
    h = Convolution2D(512, 3, 3, activation='relu', name='conv4_1')(h)
    h = Convolution2D(512, 3, 3, activation='relu', name='conv4_2')(h)
    h = Convolution2D(512, 3, 3, activation='relu', name='conv4_3')(h)
    h = AtrousConvolution2D(512, 3, 3, atrous_rate=(2, 2), activation='relu', name='conv5_1')(h)
    h = AtrousConvolution2D(512, 3, 3, atrous_rate=(2, 2), activation='relu', name='conv5_2')(h)
    h = AtrousConvolution2D(512, 3, 3, atrous_rate=(2, 2), activation='relu', name='conv5_3')(h)
    h = AtrousConvolution2D(4096, 7, 7, atrous_rate=(4, 4), activation='relu', name='fc6')(h)
    h = Dropout(0.5, name='drop6')(h)
    h = Convolution2D(4096, 1, 1, activation='relu', name='fc7')(h)
    h = Dropout(0.5, name='drop7')(h)
    h = Convolution2D(classes, 1, 1, activation='relu', name='fc-final')(h)
    h = ZeroPadding2D(padding=(33, 33))(h)
    h = Convolution2D(2 * classes, 3, 3, activation='relu', name='ct_conv1_1')(h)
    h = Convolution2D(2 * classes, 3, 3, activation='relu', name='ct_conv1_2')(h)
    h = AtrousConvolution2D(4 * classes, 3, 3, atrous_rate=(2, 2), activation='relu', name='ct_conv2_1')(h)
    h = AtrousConvolution2D(8 * classes, 3, 3, atrous_rate=(4, 4), activation='relu', name='ct_conv3_1')(h)
    h = AtrousConvolution2D(16 * classes, 3, 3, atrous_rate=(8, 8), activation='relu', name='ct_conv4_1')(h)
    h = AtrousConvolution2D(32 * classes, 3, 3, atrous_rate=(16, 16), activation='relu', name='ct_conv5_1')(h)
    h = Convolution2D(32 * classes, 3, 3, activation='relu', name='ct_fc1')(h)
    logits = Convolution2D(classes, 1, 1, name='ct_final')(h)

    if apply_softmax:
        model_out = softmax(logits)
    else:
        model_out = logits

    model = Model(input=model_in, output=model_out, name='dilation_voc12')

    return model


# KITTI MODEL 
开发者ID:DavideA,项目名称:dilation-keras,代码行数:54,代码来源:dilation_net.py

示例7: get_dilation_model_kitti

# 需要导入模块: import utils [as 别名]
# 或者: from utils import softmax [as 别名]
def get_dilation_model_kitti(input_shape, apply_softmax, input_tensor, classes):

    if input_tensor is None:
        model_in = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            model_in = Input(tensor=input_tensor, shape=input_shape)
        else:
            model_in = input_tensor

    h = Convolution2D(64, 3, 3, activation='relu', name='conv1_1')(model_in)
    h = Convolution2D(64, 3, 3, activation='relu', name='conv1_2')(h)
    h = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(h)
    h = Convolution2D(128, 3, 3, activation='relu', name='conv2_1')(h)
    h = Convolution2D(128, 3, 3, activation='relu', name='conv2_2')(h)
    h = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool2')(h)
    h = Convolution2D(256, 3, 3, activation='relu', name='conv3_1')(h)
    h = Convolution2D(256, 3, 3, activation='relu', name='conv3_2')(h)
    h = Convolution2D(256, 3, 3, activation='relu', name='conv3_3')(h)
    h = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool3')(h)
    h = Convolution2D(512, 3, 3, activation='relu', name='conv4_1')(h)
    h = Convolution2D(512, 3, 3, activation='relu', name='conv4_2')(h)
    h = Convolution2D(512, 3, 3, activation='relu', name='conv4_3')(h)
    h = AtrousConvolution2D(512, 3, 3, atrous_rate=(2, 2), activation='relu', name='conv5_1')(h)
    h = AtrousConvolution2D(512, 3, 3, atrous_rate=(2, 2), activation='relu', name='conv5_2')(h)
    h = AtrousConvolution2D(512, 3, 3, atrous_rate=(2, 2), activation='relu', name='conv5_3')(h)
    h = AtrousConvolution2D(4096, 7, 7, atrous_rate=(4, 4), activation='relu', name='fc6')(h)
    h = Dropout(0.5, name='drop6')(h)
    h = Convolution2D(4096, 1, 1, activation='relu', name='fc7')(h)
    h = Dropout(0.5, name='drop7')(h)
    h = Convolution2D(classes, 1, 1, name='final')(h)
    h = ZeroPadding2D(padding=(1, 1))(h)
    h = Convolution2D(classes, 3, 3, activation='relu', name='ctx_conv1_1')(h)
    h = ZeroPadding2D(padding=(1, 1))(h)
    h = Convolution2D(classes, 3, 3, activation='relu', name='ctx_conv1_2')(h)
    h = ZeroPadding2D(padding=(2, 2))(h)
    h = AtrousConvolution2D(classes, 3, 3, atrous_rate=(2, 2), activation='relu', name='ctx_conv2_1')(h)
    h = ZeroPadding2D(padding=(4, 4))(h)
    h = AtrousConvolution2D(classes, 3, 3, atrous_rate=(4, 4), activation='relu', name='ctx_conv3_1')(h)
    h = ZeroPadding2D(padding=(8, 8))(h)
    h = AtrousConvolution2D(classes, 3, 3, atrous_rate=(8, 8), activation='relu', name='ctx_conv4_1')(h)
    h = ZeroPadding2D(padding=(1, 1))(h)
    h = Convolution2D(classes, 3, 3, activation='relu', name='ctx_fc1')(h)
    logits = Convolution2D(classes, 1, 1, name='ctx_final')(h)

    if apply_softmax:
        model_out = softmax(logits)
    else:
        model_out = logits

    model = Model(input=model_in, output=model_out, name='dilation_kitti')

    return model


# CAMVID MODEL 
开发者ID:DavideA,项目名称:dilation-keras,代码行数:58,代码来源:dilation_net.py

示例8: get_dilation_model_camvid

# 需要导入模块: import utils [as 别名]
# 或者: from utils import softmax [as 别名]
def get_dilation_model_camvid(input_shape, apply_softmax, input_tensor, classes):

    if input_tensor is None:
        model_in = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            model_in = Input(tensor=input_tensor, shape=input_shape)
        else:
            model_in = input_tensor

    h = Convolution2D(64, 3, 3, activation='relu', name='conv1_1')(model_in)
    h = Convolution2D(64, 3, 3, activation='relu', name='conv1_2')(h)
    h = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool1')(h)
    h = Convolution2D(128, 3, 3, activation='relu', name='conv2_1')(h)
    h = Convolution2D(128, 3, 3, activation='relu', name='conv2_2')(h)
    h = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool2')(h)
    h = Convolution2D(256, 3, 3, activation='relu', name='conv3_1')(h)
    h = Convolution2D(256, 3, 3, activation='relu', name='conv3_2')(h)
    h = Convolution2D(256, 3, 3, activation='relu', name='conv3_3')(h)
    h = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='pool3')(h)
    h = Convolution2D(512, 3, 3, activation='relu', name='conv4_1')(h)
    h = Convolution2D(512, 3, 3, activation='relu', name='conv4_2')(h)
    h = Convolution2D(512, 3, 3, activation='relu', name='conv4_3')(h)
    h = AtrousConvolution2D(512, 3, 3, atrous_rate=(2, 2), activation='relu', name='conv5_1')(h)
    h = AtrousConvolution2D(512, 3, 3, atrous_rate=(2, 2), activation='relu', name='conv5_2')(h)
    h = AtrousConvolution2D(512, 3, 3, atrous_rate=(2, 2), activation='relu', name='conv5_3')(h)
    h = AtrousConvolution2D(4096, 7, 7, atrous_rate=(4, 4), activation='relu', name='fc6')(h)
    h = Dropout(0.5, name='drop6')(h)
    h = Convolution2D(4096, 1, 1, activation='relu', name='fc7')(h)
    h = Dropout(0.5, name='drop7')(h)
    h = Convolution2D(classes, 1, 1, name='final')(h)
    h = ZeroPadding2D(padding=(1, 1))(h)
    h = Convolution2D(classes, 3, 3, activation='relu', name='ctx_conv1_1')(h)
    h = ZeroPadding2D(padding=(1, 1))(h)
    h = Convolution2D(classes, 3, 3, activation='relu', name='ctx_conv1_2')(h)
    h = ZeroPadding2D(padding=(2, 2))(h)
    h = AtrousConvolution2D(classes, 3, 3, atrous_rate=(2, 2), activation='relu', name='ctx_conv2_1')(h)
    h = ZeroPadding2D(padding=(4, 4))(h)
    h = AtrousConvolution2D(classes, 3, 3, atrous_rate=(4, 4), activation='relu', name='ctx_conv3_1')(h)
    h = ZeroPadding2D(padding=(8, 8))(h)
    h = AtrousConvolution2D(classes, 3, 3, atrous_rate=(8, 8), activation='relu', name='ctx_conv4_1')(h)
    h = ZeroPadding2D(padding=(16, 16))(h)
    h = AtrousConvolution2D(classes, 3, 3, atrous_rate=(16, 16), activation='relu', name='ctx_conv5_1')(h)
    h = ZeroPadding2D(padding=(1, 1))(h)
    h = Convolution2D(classes, 3, 3, activation='relu', name='ctx_fc1')(h)
    logits = Convolution2D(classes, 1, 1, name='ctx_final')(h)

    if apply_softmax:
        model_out = softmax(logits)
    else:
        model_out = logits

    model = Model(input=model_in, output=model_out, name='dilation_camvid')

    return model


# model function 
开发者ID:DavideA,项目名称:dilation-keras,代码行数:60,代码来源:dilation_net.py


注:本文中的utils.softmax方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。