当前位置: 首页>>代码示例>>Python>>正文


Python activations.softmax方法代码示例

本文整理汇总了Python中keras.activations.softmax方法的典型用法代码示例。如果您正苦于以下问题:Python activations.softmax方法的具体用法?Python activations.softmax怎么用?Python activations.softmax使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.activations的用法示例。


在下文中一共展示了activations.softmax方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: nn_model

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import softmax [as 别名]
def nn_model():
    (x_train, y_train), _ = mnist.load_data()
    # 归一化
    x_train = x_train.reshape(x_train.shape[0], -1) / 255.
    # one-hot
    y_train = np_utils.to_categorical(y=y_train, num_classes=10)
    # constant(value=1.)自定义常数,constant(value=1.)===one()
    # 创建模型:输入784个神经元,输出10个神经元
    model = Sequential([
        Dense(units=200, input_dim=784, bias_initializer=constant(value=1.), activation=tanh),
        Dense(units=100, bias_initializer=one(), activation=tanh),
        Dense(units=10, bias_initializer=one(), activation=softmax),
    ])

    opt = SGD(lr=0.2, clipnorm=1.)  # 优化器
    model.compile(optimizer=opt, loss=categorical_crossentropy, metrics=['acc', 'mae'])  # 编译
    model.fit(x_train, y_train, batch_size=64, epochs=20, callbacks=[RemoteMonitor()])
    model_save(model, './model.h5') 
开发者ID:jtyoui,项目名称:Jtyoui,代码行数:20,代码来源:HandWritingRecognition.py

示例2: test_softmax

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import softmax [as 别名]
def test_softmax():

    from keras.activations import softmax as s

    # Test using a reference implementation of softmax
    def softmax(values):
        m = max(values)
        values = numpy.array(values)
        e = numpy.exp(values - m)
        dist = list(e / numpy.sum(e))

        return dist

    x = T.vector()
    exp = s(x)
    f = theano.function([x], exp)
    test_values=get_standard_values()

    result = f(test_values)
    expected = softmax(test_values)

    print(str(result))
    print(str(expected))

    list_assert_equal(result, expected) 
开发者ID:lllcho,项目名称:CAPTCHA-breaking,代码行数:27,代码来源:test_activations.py

示例3: step

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import softmax [as 别名]
def step(self, x_input, states):
    	#print "x_input:", x_input, x_input.shape
    	# <TensorType(float32, matrix)>
    	
        input_shape = self.input_spec[0].shape
        en_seq = states[-1]
        _, [h, c] = super(PointerLSTM, self).step(x_input, states[:-1])

        # vt*tanh(W1*e+W2*d)
        dec_seq = K.repeat(h, input_shape[1])
        Eij = time_distributed_dense(en_seq, self.W1, output_dim=1)
        Dij = time_distributed_dense(dec_seq, self.W2, output_dim=1)
        U = self.vt * tanh(Eij + Dij)
        U = K.squeeze(U, 2)

        # make probability tensor
        pointer = softmax(U)
        return pointer, [h, c] 
开发者ID:zygmuntz,项目名称:pointer-networks-experiments,代码行数:20,代码来源:PointerLSTM.py

示例4: __init__

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import softmax [as 别名]
def __init__(self, coords=4, classes=20, num=1,
            log=0, sqrt=0, softmax=0, background=0, max=30,
            jitter=0.2, 
            rescore = 0, thresh=0.5, classfix=0, absolute=0, random=0,
            coord_scale=1, object_scale=1,
            noobject_scale=1, class_scale=1,
            bias_match=0,
            tree=None,#tree_file for softmax_tree - not used now
            map_filename=None, # file name for map_file - not used
            anchors=None,
            **kwargs
            ):
        super(Region, self).__init__(**kwargs)
        self.coords = coords
        self.classes = classes
        self.num = num
        self.background = background
        print(coords, classes)
        self.c = (self.coords+self.classes+1)*num
        if anchors:
            self.biases = list(map(float, anchors))
        pass 
开发者ID:BrainsGarden,项目名称:keras-yolo,代码行数:24,代码来源:region.py

示例5: _process_input

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import softmax [as 别名]
def _process_input(self, x):
        """Apply logistic and softmax activations to input tensor
        """
        logistic_activate = lambda x: 1.0/(1.0 + K.exp(-x))
        
        (batch, w, h, channels) = x.get_shape()
        x_temp = K.permute_dimensions(x, (3, 0, 1, 2))
        x_t = []
        for i in range(self.num):
            k = self._entry_index(i, 0)
            x_t.extend([
                logistic_activate(K.gather(x_temp, (k, k + 1))), # 0
                K.gather(x_temp, (k + 2, k + 3))])
            if self.background:
                x_t.append(K.gather(x_temp, (k + 4,)))
            else:
                x_t.append(logistic_activate(K.gather(x_temp, (k + 4,))))
                
            x_t.append(
                softmax(
                    K.gather(x_temp, tuple(range(k + 5, k + self.coords + self.classes + 1))),
                    axis=0))
        x_t = K.concatenate(x_t, axis=0)
        return K.permute_dimensions(x_t, (1, 2, 3, 0)) 
开发者ID:BrainsGarden,项目名称:keras-yolo,代码行数:26,代码来源:region.py

示例6: _compute_probabilities

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import softmax [as 别名]
def _compute_probabilities(self, energy, previous_attention=None):
        if self.is_monotonic:
            # add presigmoid noise to encourage discreteness
            sigmoid_noise = K.in_train_phase(1., 0.)
            noise = K.random_normal(K.shape(energy), mean=0.0, stddev=sigmoid_noise)
            # encourage discreteness in train
            energy = K.in_train_phase(energy + noise, energy)

            p = K.in_train_phase(K.sigmoid(energy),
                                 K.cast(energy > 0, energy.dtype))
            p = K.squeeze(p, -1)
            p_prev = K.squeeze(previous_attention, -1)
            # monotonic attention function from tensorflow
            at = K.in_train_phase(
                tf.contrib.seq2seq.monotonic_attention(p, p_prev, 'parallel'),
                tf.contrib.seq2seq.monotonic_attention(p, p_prev, 'hard'))
            at = K.expand_dims(at, -1)
        else:
            # softmax
            at = keras.activations.softmax(energy, axis=1)

        return at 
开发者ID:asmekal,项目名称:keras-monotonic-attention,代码行数:24,代码来源:attention_decoder.py

示例7: _get_weight_vector

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import softmax [as 别名]
def _get_weight_vector(self, M, w_tm1, k, beta, g, s, gamma):
#        M = tf.Print(M, [M, w_tm1, k], message='get weights beg1: ')
#        M = tf.Print(M, [beta, g, s, gamma], message='get weights beg2: ')
        # Content adressing, see Chapter 3.3.1:
        num = beta * _cosine_distance(M, k)
        w_c  = K.softmax(num) # It turns out that equation (5) is just softmax.
        # Location adressing, see Chapter 3.3.2:
        # Equation 7:
        w_g = (g * w_c) + (1-g)*w_tm1
        # C_s is the circular convolution
        #C_w = K.sum((self.C[None, :, :, :] * w_g[:, None, None, :]),axis=3)
        # Equation 8:
        # TODO: Explain
        C_s = K.sum(K.repeat_elements(self.C[None, :, :, :], self.batch_size, axis=0) * s[:,:,None,None], axis=1)
        w_tilda = K.batch_dot(C_s, w_g)
        # Equation 9:
        w_out = _renorm(w_tilda ** gamma)

        return w_out 
开发者ID:flomlo,项目名称:ntm_keras,代码行数:21,代码来源:ntm.py

示例8: createModel

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import softmax [as 别名]
def createModel(patchSize, patchSize_down=None, ScaleFactor=1, learningRate=1e-3, optimizer='SGD',
                     dr_rate=0.0, input_dr_rate=0.0, max_norm=5, iPReLU=0, l2_reg=1e-6):
    # Total params: 453,570
    input_orig = Input(shape=(1, int(patchSize[0]), int(patchSize[1])))
    path_orig_output = fConveBlock(input_orig)
    input_down = Input(shape=(1, int(patchSize_down[0]), int(patchSize_down[1])))
    path_down = fConveBlock(input_down)
    path_down_output = fUpSample(path_down, ScaleFactor)
    multi_scale_connect = fconcatenate(path_orig_output, path_down_output)

    # fully connect layer as dense
    flat_out = Flatten()(multi_scale_connect)
    dropout_out = Dropout(dr_rate)(flat_out)
    dense_out = Dense(units=2,
                          kernel_initializer='normal',
                          kernel_regularizer=l2(l2_reg))(dropout_out)
    # Fully connected layer as convo with 1X1 ?

    output_fc1 = Activation('softmax')(dense_out)
    output_fc2 = Activation('softmax')(dense_out)
    output_p1 = Lambda(sliceP1,name='path1_output',output_shape=(None,2))(output_fc1)
    output_p2 = Lambda(sliceP2,name='path2_output',output_shape=(None,2))(output_fc2)
    cnn_ms = Model(inputs=[input_orig, input_down], outputs=[output_p1,output_p2])
    return cnn_ms 
开发者ID:thomaskuestner,项目名称:CNNArt,代码行数:26,代码来源:motion_all_CNN2D_multiscale.py

示例9: get_model_lstm

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import softmax [as 别名]
def get_model_lstm():
    nclass = 5

    seq_input = Input(shape=(None, 3000, 1))
    base_model = get_base_model()
    for layer in base_model.layers:
        layer.trainable = False
    encoded_sequence = TimeDistributed(base_model)(seq_input)
    encoded_sequence = Bidirectional(LSTM(100, return_sequences=True))(encoded_sequence)
    encoded_sequence = Dropout(rate=0.5)(encoded_sequence)
    encoded_sequence = Bidirectional(LSTM(100, return_sequences=True))(encoded_sequence)
    #out = TimeDistributed(Dense(nclass, activation="softmax"))(encoded_sequence)
    out = Convolution1D(nclass, kernel_size=1, activation="softmax", padding="same")(encoded_sequence)

    model = models.Model(seq_input, out)

    model.compile(optimizers.Adam(0.001), losses.sparse_categorical_crossentropy, metrics=['acc'])
    model.summary()

    return model 
开发者ID:CVxTz,项目名称:EEG_classification,代码行数:22,代码来源:models.py

示例10: iris

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import softmax [as 别名]
def iris():

    from keras.optimizers import Adam, Nadam
    from keras.losses import logcosh, categorical_crossentropy
    from keras.activations import relu, elu, softmax

    # here use a standard 2d dictionary for inputting the param boundaries
    p = {'lr': (0.5, 5, 10),
         'first_neuron': [4, 8, 16, 32, 64],
         'hidden_layers': [0, 1, 2, 3, 4],
         'batch_size': (2, 30, 10),
         'epochs': [2],
         'dropout': (0, 0.5, 5),
         'weight_regulizer': [None],
         'emb_output_dims':  [None],
         'shapes': ['brick', 'triangle', 0.2],
         'optimizer': [Adam, Nadam],
         'losses': [logcosh, categorical_crossentropy],
         'activation': [relu, elu],
         'last_activation': [softmax]}

    return p 
开发者ID:autonomio,项目名称:talos,代码行数:24,代码来源:params.py

示例11: cnn_model

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import softmax [as 别名]
def cnn_model():
    (x_train, y_train), _ = mnist.load_data()
    # 归一化
    x_train = x_train.reshape(-1, 28, 28, 1) / 255.
    # one-hot
    y_train = np_utils.to_categorical(y=y_train, num_classes=10)

    model = Sequential([
        # input_shape:输入平面,就在第一个位置设置
        # filters:卷积核、滤波器
        # kernel_size:卷积核大小
        # strides:步长
        # padding有两种方式:same/valid
        # activation:激活函数
        Convolution2D(input_shape=(28, 28, 1), filters=32, kernel_size=5, strides=1, padding='same', activation=relu),
        MaxPool2D(pool_size=2, strides=2, padding='same'),
        Convolution2D(filters=64, kernel_size=5, padding='same', activation=relu),
        MaxPool2D(pool_size=2, trainable=2, padding='same'),
        Flatten(),  # 扁平化
        Dense(units=1024, activation=relu),
        Dropout(0.5),
        Dense(units=10, activation=softmax),
    ])
    opt = Adam(lr=1e-4)
    model.compile(optimizer=opt, loss=categorical_crossentropy, metrics=['accuracy'])
    model.fit(x=x_train, y=y_train, batch_size=64, epochs=20, callbacks=[RemoteMonitor()])
    model_save(model, './model.h5') 
开发者ID:jtyoui,项目名称:Jtyoui,代码行数:29,代码来源:HandWritingRecognition.py

示例12: rnn_model

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import softmax [as 别名]
def rnn_model():
    (x_train, y_train), _ = mnist.load_data()
    # 归一化
    x_train = x_train / 255.
    # one-hot
    y_train = np_utils.to_categorical(y=y_train, num_classes=10)

    model = Sequential([
        SimpleRNN(units=50, input_shape=(28, 28)),
        Dense(units=10, activation=softmax),
    ])
    opt = RMSprop(lr=1e-4)
    model.compile(optimizer=opt, loss=categorical_crossentropy, metrics=['accuracy'])
    model.fit(x=x_train, y=y_train, batch_size=64, epochs=20, callbacks=[RemoteMonitor()])
    model_save(model, './model.h5') 
开发者ID:jtyoui,项目名称:Jtyoui,代码行数:17,代码来源:HandWritingRecognition.py

示例13: build

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import softmax [as 别名]
def build(self):
        """
        Build model structure.

        aNMM model based on bin weighting and query term attentions
        """
        # query is [batch_size, left_text_len]
        # doc is [batch_size, right_text_len, bin_num]
        query, doc = self._make_inputs()
        embedding = self._make_embedding_layer()

        q_embed = embedding(query)
        q_attention = keras.layers.Dense(
            1, kernel_initializer=RandomUniform(), use_bias=False)(q_embed)
        q_text_len = self._params['input_shapes'][0][0]

        q_attention = keras.layers.Lambda(
            lambda x: softmax(x, axis=1),
            output_shape=(q_text_len,)
        )(q_attention)
        d_bin = keras.layers.Dropout(
            rate=self._params['dropout_rate'])(doc)
        for layer_id in range(self._params['num_layers'] - 1):
            d_bin = keras.layers.Dense(
                self._params['hidden_sizes'][layer_id],
                kernel_initializer=RandomUniform())(d_bin)
            d_bin = keras.layers.Activation('tanh')(d_bin)
        d_bin = keras.layers.Dense(
            self._params['hidden_sizes'][self._params['num_layers'] - 1])(
            d_bin)
        d_bin = keras.layers.Reshape((q_text_len,))(d_bin)
        q_attention = keras.layers.Reshape((q_text_len,))(q_attention)
        score = keras.layers.Dot(axes=[1, 1])([d_bin, q_attention])
        x_out = self._make_output_layer()(score)
        self._backend = keras.Model(inputs=[query, doc], outputs=x_out) 
开发者ID:NTMC-Community,项目名称:MatchZoo,代码行数:37,代码来源:anmm.py

示例14: fCreateModel

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import softmax [as 别名]
def fCreateModel(patchSize, learningRate=1e-3, optimizer='SGD',
                 dr_rate=0.0, input_dr_rate=0.0, max_norm=5, iPReLU=0, l2_reg=1e-6):
    l2_reg = 1e-4
    # using SGD lr 0.001
    # motion_head:unkorrigierte Version 3steps with only type(1,1,1)(149K params)--> val_loss: 0.2157 - val_acc: 0.9230
    # motion_head:korrigierte Version type(1,2,2)(266K params) --> val_loss: 0.2336 - val_acc: 0.9149 nach abbruch...
    # double_#channels(type 122) (870,882 params)>
    # functional api...
    input_t = Input(shape=(1, int(patchSize[0, 0]), int(patchSize[0, 1]), int(patchSize[0, 2])))

    after_res1_t = fCreateVNet_Block(input_t, 32, type=2, iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)
    after_DownConv1_t = fCreateVNet_DownConv_Block(after_res1_t, after_res1_t._keras_shape[1], (2, 2, 2),
                                                   iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)

    after_res2_t = fCreateVNet_Block(after_DownConv1_t, 64, type=2, iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)
    after_DownConv2_t = fCreateVNet_DownConv_Block(after_res2_t, after_res2_t._keras_shape[1], (2, 2, 1),
                                                   iPReLU=iPReLU, l2_reg=l2_reg, dr_rate=dr_rate)

    after_res3_t = fCreateVNet_Block(after_DownConv2_t, 128, type=2, iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)
    after_DownConv3_t = fCreateVNet_DownConv_Block(after_res3_t, after_res3_t._keras_shape[1], (2, 2, 1),
                                                   iPReLU=iPReLU, l2_reg=l2_reg, dr_rate=dr_rate)

    after_flat_t = Flatten()(after_DownConv3_t)
    after_dense_t = Dropout(dr_rate)(after_flat_t)
    after_dense_t = Dense(units=2,
                          kernel_initializer='normal',
                          kernel_regularizer=l2(l2_reg))(after_dense_t)
    output_t = Activation('softmax')(after_dense_t)

    cnn = Model(inputs=[input_t], outputs=[output_t])

    opti, loss = fGetOptimizerAndLoss(optimizer, learningRate=learningRate)  # loss cat_crosent default
    cnn.compile(optimizer=opti, loss=loss, metrics=['accuracy'])
    sArchiSpecs = '_t222_l2{}_dr{}'.format(l2_reg, dr_rate) 
开发者ID:thomaskuestner,项目名称:CNNArt,代码行数:36,代码来源:VNetArt.py

示例15: fCreateModel

# 需要导入模块: from keras import activations [as 别名]
# 或者: from keras.activations import softmax [as 别名]
def fCreateModel(patchSize, learningRate=1e-3, optimizer='SGD',
                     dr_rate=0.0, input_dr_rate=0.0, max_norm=5, iPReLU=0, l2_reg=1e-6):
    l2_reg = 1e-4
    # using SGD lr 0.001
    # motion_head:unkorrigierte Version 3steps with only type(1,1,1)(149K params)--> val_loss: 0.2157 - val_acc: 0.9230
    # motion_head:korrigierte Version type(1,2,2)(266K params) --> val_loss: 0.2336 - val_acc: 0.9149 nach abbruch...
    # double_#channels(type 122) (870,882 params)>
    # functional api...
    input_t = Input(shape=(1, int(patchSize[0]), int(patchSize[1]), int(patchSize[2])))

    after_res1_t = fCreateVNet_Block(input_t, 32, type=2, iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)
    after_DownConv1_t = fCreateVNet_DownConv_Block(after_res1_t, after_res1_t._keras_shape[1], (2, 2, 2),
                                                     iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)

    after_res2_t = fCreateVNet_Block(after_DownConv1_t, 64, type=2, iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)
    after_DownConv2_t = fCreateVNet_DownConv_Block(after_res2_t, after_res2_t._keras_shape[1], (2, 2, 1),
                                                     iPReLU=iPReLU, l2_reg=l2_reg, dr_rate=dr_rate)

    after_res3_t = fCreateVNet_Block(after_DownConv2_t, 128, type=2, iPReLU=iPReLU, dr_rate=dr_rate, l2_reg=l2_reg)
    after_DownConv3_t = fCreateVNet_DownConv_Block(after_res3_t, after_res3_t._keras_shape[1], (2, 2, 1),
                                                     iPReLU=iPReLU, l2_reg=l2_reg, dr_rate=dr_rate)

    after_flat_t = Flatten()(after_DownConv3_t)
    after_dense_t = Dropout(dr_rate)(after_flat_t)
    after_dense_t = Dense(units=2,
                          kernel_initializer='normal',
                          kernel_regularizer=l2(l2_reg))(after_dense_t)
    output_t = Activation('softmax')(after_dense_t)

    cnn = Model(inputs=[input_t], outputs=[output_t])

    opti, loss = fGetOptimizerAndLoss(optimizer, learningRate=learningRate)  # loss cat_crosent default
    cnn.compile(optimizer=opti, loss=loss, metrics=['accuracy'])
    sArchiSpecs = '_t222_l2{}_dr{}'.format(l2_reg, dr_rate)
    return cnn 
开发者ID:thomaskuestner,项目名称:CNNArt,代码行数:37,代码来源:motion_VNetArt.py


注:本文中的keras.activations.softmax方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。