當前位置: 首頁>>代碼示例>>Python>>正文


Python models.Graph方法代碼示例

本文整理匯總了Python中keras.models.Graph方法的典型用法代碼示例。如果您正苦於以下問題:Python models.Graph方法的具體用法?Python models.Graph怎麽用?Python models.Graph使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在keras.models的用法示例。


在下文中一共展示了models.Graph方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_1o_1i

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import Graph [as 別名]
def test_1o_1i(self):
        print('test a non-sequential graph with 1 input and 1 output')
        graph = Graph()
        graph.add_input(name='input1', ndim=2)

        graph.add_node(Dense(32, 16), name='dense1', input='input1')
        graph.add_node(Dense(32, 4), name='dense2', input='input1')
        graph.add_node(Dense(16, 4), name='dense3', input='dense1')

        graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum')
        graph.compile('rmsprop', {'output1': 'mse'})

        history = graph.fit({'input1': X_train, 'output1': y_train}, nb_epoch=10)
        out = graph.predict({'input1': X_test})
        assert(type(out == dict))
        assert(len(out) == 1)
        loss = graph.test_on_batch({'input1': X_test, 'output1': y_test})
        loss = graph.train_on_batch({'input1': X_test, 'output1': y_test})
        loss = graph.evaluate({'input1': X_test, 'output1': y_test})
        print(loss)
        assert(loss < 2.5) 
開發者ID:lllcho,項目名稱:CAPTCHA-breaking,代碼行數:23,代碼來源:test_graph_model.py

示例2: test_1o_1i_2

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import Graph [as 別名]
def test_1o_1i_2(self):
        print('test a more complex non-sequential graph with 1 input and 1 output')
        graph = Graph()
        graph.add_input(name='input1', ndim=2)

        graph.add_node(Dense(32, 16), name='dense1', input='input1')
        graph.add_node(Dense(32, 4), name='dense2-0', input='input1')
        graph.add_node(Activation('relu'), name='dense2', input='dense2-0')

        graph.add_node(Dense(4, 16), name='dense3', input='dense2')
        graph.add_node(Dense(16, 4), name='dense4', inputs=['dense1', 'dense3'], merge_mode='sum')

        graph.add_output(name='output1', inputs=['dense2', 'dense4'], merge_mode='sum')
        graph.compile('rmsprop', {'output1': 'mse'})

        history = graph.fit({'input1': X_train, 'output1': y_train}, nb_epoch=10)
        out = graph.predict({'input1': X_train})
        assert(type(out == dict))
        assert(len(out) == 1)
        loss = graph.test_on_batch({'input1': X_test, 'output1': y_test})
        loss = graph.train_on_batch({'input1': X_test, 'output1': y_test})
        loss = graph.evaluate({'input1': X_test, 'output1': y_test})
        print(loss)
        assert(loss < 2.5)
        graph.get_config(verbose=1) 
開發者ID:lllcho,項目名稱:CAPTCHA-breaking,代碼行數:27,代碼來源:test_graph_model.py

示例3: test_1o_2i

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import Graph [as 別名]
def test_1o_2i(self):
        print('test a non-sequential graph with 2 inputs and 1 output')
        graph = Graph()
        graph.add_input(name='input1', ndim=2)
        graph.add_input(name='input2', ndim=2)

        graph.add_node(Dense(32, 16), name='dense1', input='input1')
        graph.add_node(Dense(32, 4), name='dense2', input='input2')
        graph.add_node(Dense(16, 4), name='dense3', input='dense1')

        graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum')
        graph.compile('rmsprop', {'output1': 'mse'})

        history = graph.fit({'input1': X_train, 'input2': X2_train, 'output1': y_train}, nb_epoch=10)
        out = graph.predict({'input1': X_test, 'input2': X2_test})
        assert(type(out == dict))
        assert(len(out) == 1)
        loss = graph.test_on_batch({'input1': X_test, 'input2': X2_test, 'output1': y_test})
        loss = graph.train_on_batch({'input1': X_test, 'input2': X2_test, 'output1': y_test})
        loss = graph.evaluate({'input1': X_test, 'input2': X2_test, 'output1': y_test})
        print(loss)
        assert(loss < 3.0)
        graph.get_config(verbose=1) 
開發者ID:lllcho,項目名稱:CAPTCHA-breaking,代碼行數:25,代碼來源:test_graph_model.py

示例4: test_recursive

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import Graph [as 別名]
def test_recursive(self):
        print('test layer-like API')

        graph = containers.Graph()
        graph.add_input(name='input1', ndim=2)
        graph.add_node(Dense(32, 16), name='dense1', input='input1')
        graph.add_node(Dense(32, 4), name='dense2', input='input1')
        graph.add_node(Dense(16, 4), name='dense3', input='dense1')
        graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum')

        seq = Sequential()
        seq.add(Dense(32, 32, name='first_seq_dense'))
        seq.add(graph)
        seq.add(Dense(4, 4, name='last_seq_dense'))

        seq.compile('rmsprop', 'mse')

        history = seq.fit(X_train, y_train, batch_size=10, nb_epoch=10)
        loss = seq.evaluate(X_test, y_test)
        print(loss)
        assert(loss < 2.5)

        loss = seq.evaluate(X_test, y_test, show_accuracy=True)
        pred = seq.predict(X_test)
        seq.get_config(verbose=1) 
開發者ID:lllcho,項目名稱:CAPTCHA-breaking,代碼行數:27,代碼來源:test_graph_model.py

示例5: create_res_texture_net

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import Graph [as 別名]
def create_res_texture_net(input_rows, input_cols, num_res_filters=128,
        res_out_activation='linear', activation='relu', num_res_blocks=5):
    net = Graph()
    net.add_input('x', input_shape=(3, input_rows, input_cols))
    add_conv_block(net, 'in0', 'x', num_res_filters // 4, 9, activation=activation)
    add_conv_block(net, 'in1', 'in0', num_res_filters // 2, 3, subsample=(2, 2), activation=activation)
    add_conv_block(net, 'in2', 'in1', num_res_filters, 3, subsample=(2, 2), activation=activation)
    last_block_name = 'in2'
    for res_i in range(num_res_blocks):
        block_name = 'res_{}'.format(res_i)
        add_conv_block(net, block_name + '_in0', last_block_name, num_res_filters, 3, activation=activation)
        add_conv_block(net, block_name + '_in1', block_name + '_in0', num_res_filters, 3, activation='linear')
        net.add_node(Activation(res_out_activation), block_name, merge_mode='sum', inputs=[block_name + '_in1', last_block_name])
        last_block_name = block_name
    # theano doesn't seem to support fractionally-strided convolutions at the moment
    net.add_node(UpSampling2D(), 'out_up0', last_block_name)
    add_conv_block(net, 'out_0', 'out_up0', num_res_filters // 2, 3, activation=activation)
    net.add_node(UpSampling2D(), 'out_up1', 'out_0')
    add_conv_block(net, 'out_1', 'out_up1', num_res_filters // 4, 3, activation=activation)
    add_conv_block(net, 'out_2', 'out_1', 3, 9, activation='linear')
    net.add_node(Activation('linear'), 'texture_rgb', 'out_2', create_output=True)
    return net 
開發者ID:awentzonline,項目名稱:keras-rtst,代碼行數:24,代碼來源:base.py

示例6: model_picture

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import Graph [as 別名]
def model_picture(model, to_file='local/model.png'):

    graph = pydot.Dot(graph_type='digraph')
    if isinstance(model,Sequential):
        previous_node = None
        written_nodes = []
        n = 1
        for node in model.get_config()['layers']:
            # append number in case layers have same name to differentiate
            if (node['name'] + str(n)) in written_nodes:
                n += 1
            current_node = pydot.Node(node['name'] + str(n))
            written_nodes.append(node['name'] + str(n))
            graph.add_node(current_node)
            if previous_node:
                graph.add_edge(pydot.Edge(previous_node, current_node))
            previous_node = current_node
        graph.write_png(to_file)

    elif isinstance(model,Graph):
        # don't need to append number for names since all nodes labeled
        for input_node in model.input_config:
            graph.add_node(pydot.Node(input_node['name']))

        # intermediate and output nodes have input defined
        for layer_config in [model.node_config, model.output_config]:
            for node in layer_config:
                graph.add_node(pydot.Node(node['name']))
                # possible to have multiple 'inputs' vs 1 'input'
                if node['inputs']:
                    for e in node['inputs']:
                        graph.add_edge(pydot.Edge(e, node['name']))
                else:
                    graph.add_edge(pydot.Edge(node['input'], node['name']))

        graph.write_png(to_file) 
開發者ID:mateuszmalinowski,項目名稱:visual_turing_test-tutorial,代碼行數:38,代碼來源:model_visualization.py

示例7: create_graph_model

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import Graph [as 別名]
def create_graph_model():
    model = Graph()
    model.add_input(name='input')
    model.add_node(Dense(784, 50, activation='relu'), name='d1', input='input')
    model.add_node(Dense(50, 10, activation='softmax'), name='d2', input='d1')
    model.add_output(name='output', input='d2')
    return model 
開發者ID:lllcho,項目名稱:CAPTCHA-breaking,代碼行數:9,代碼來源:test_loss_weighting.py

示例8: test_2o_1i_weights

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import Graph [as 別名]
def test_2o_1i_weights(self):
        print('test a non-sequential graph with 1 input and 2 outputs')
        graph = Graph()
        graph.add_input(name='input1', ndim=2)

        graph.add_node(Dense(32, 16), name='dense1', input='input1')
        graph.add_node(Dense(32, 4), name='dense2', input='input1')
        graph.add_node(Dense(16, 1), name='dense3', input='dense1')

        graph.add_output(name='output1', input='dense2')
        graph.add_output(name='output2', input='dense3')
        graph.compile('rmsprop', {'output1': 'mse', 'output2': 'mse'})

        history = graph.fit({'input1': X_train, 'output1': y_train, 'output2': y2_train}, nb_epoch=10)
        out = graph.predict({'input1': X_test})
        assert(type(out == dict))
        assert(len(out) == 2)
        loss = graph.test_on_batch({'input1': X_test, 'output1': y_test, 'output2': y2_test})
        loss = graph.train_on_batch({'input1': X_test, 'output1': y_test, 'output2': y2_test})
        loss = graph.evaluate({'input1': X_test, 'output1': y_test, 'output2': y2_test})
        print(loss)
        assert(loss < 4.)

        print('test weight saving')
        graph.save_weights('temp.h5', overwrite=True)
        graph = Graph()
        graph.add_input(name='input1', ndim=2)
        graph.add_node(Dense(32, 16), name='dense1', input='input1')
        graph.add_node(Dense(32, 4), name='dense2', input='input1')
        graph.add_node(Dense(16, 1), name='dense3', input='dense1')
        graph.add_output(name='output1', input='dense2')
        graph.add_output(name='output2', input='dense3')
        graph.compile('rmsprop', {'output1': 'mse', 'output2': 'mse'})
        graph.load_weights('temp.h5')
        nloss = graph.evaluate({'input1': X_test, 'output1': y_test, 'output2': y2_test})
        print(nloss)
        assert(loss == nloss) 
開發者ID:lllcho,項目名稱:CAPTCHA-breaking,代碼行數:39,代碼來源:test_graph_model.py

示例9: gen_model_brnn

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import Graph [as 別名]
def gen_model_brnn(vocab_size=100, embedding_size=128, maxlen=100, output_size=6, hidden_layer_size=100, num_hidden_layers = 1, RNN_LAYER_TYPE="LSTM"):
    RNN_CLASS = LSTM
    if RNN_LAYER_TYPE == "GRU":
        RNN_CLASS = GRU
    logger.info("Parameters: vocab_size = %s, embedding_size = %s, maxlen = %s, output_size = %s, hidden_layer_size = %s, " %\
            (vocab_size, embedding_size, maxlen, output_size, hidden_layer_size))
    logger.info("Building Graph model for Bidirectional RNN")
    model = Graph()
    model.add_input(name='input', input_shape=(maxlen,), dtype=int)
    logger.info("Added Input node")
    logger.info("Init Model with vocab_size = %s, embedding_size = %s, maxlen = %s" % (vocab_size, embedding_size, maxlen))
    model.add_node(Embedding(vocab_size, embedding_size, input_length=maxlen), name='embedding', input='input')
    logger.info("Added Embedding node")
    model.add_node(Dropout(0.5), name="dropout_0", input="embedding")
    logger.info("Added Dropout Node")
    for i in xrange(num_hidden_layers):
        last_dropout_name = "dropout_%s" % i
        forward_name, backward_name, dropout_name = ["%s_%s" % (k, i + 1) for k in ["forward", "backward", "dropout"]]
        model.add_node(RNN_CLASS(output_dim=hidden_layer_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True), name=forward_name, input=last_dropout_name)
        logger.info("Added %s forward node[%s]" % (RNN_LAYER_TYPE, i+1))
        model.add_node(RNN_CLASS(output_dim=hidden_layer_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True, go_backwards=True), name=backward_name, input=last_dropout_name)
        logger.info("Added %s backward node[%s]" % (RNN_LAYER_TYPE, i+1))
        model.add_node(Dropout(0.5), name=dropout_name, inputs=[forward_name, backward_name])
        logger.info("Added Dropout node[%s]" % (i+1))
    model.add_node(TimeDistributedDense(output_size, activation="softmax"), name="tdd", input=dropout_name)
    logger.info("Added TimeDistributedDense node")
    model.add_output(name="output", input="tdd")
    logger.info("Added Output node")
    logger.info("Created model with following config:\n%s" % model.get_config())
    logger.info("Compiling model with optimizer %s" % optimizer)
    start_time = time.time()
    model.compile(optimizer, {"output": 'categorical_crossentropy'})
    total_time = time.time() - start_time
    logger.info("Model compiled in %.4f seconds." % total_time)
    return model 
開發者ID:napsternxg,項目名稱:DeepSequenceClassification,代碼行數:37,代碼來源:model.py

示例10: gen_model_brnn_multitask

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import Graph [as 別名]
def gen_model_brnn_multitask(vocab_size=100, embedding_size=128, maxlen=100, output_size=[6, 96], hidden_layer_size=100, num_hidden_layers = 1, RNN_LAYER_TYPE="LSTM"):
    RNN_CLASS = LSTM
    if RNN_LAYER_TYPE == "GRU":
        RNN_CLASS = GRU
    logger.info("Parameters: vocab_size = %s, embedding_size = %s, maxlen = %s, output_size = %s, hidden_layer_size = %s, " %\
            (vocab_size, embedding_size, maxlen, output_size, hidden_layer_size))
    logger.info("Building Graph model for Bidirectional RNN")
    model = Graph()
    model.add_input(name='input', input_shape=(maxlen,), dtype=int)
    logger.info("Added Input node")
    logger.info("Init Model with vocab_size = %s, embedding_size = %s, maxlen = %s" % (vocab_size, embedding_size, maxlen))
    model.add_node(Embedding(vocab_size, embedding_size, input_length=maxlen, mask_zero=True), name='embedding', input='input')
    logger.info("Added Embedding node")
    model.add_node(Dropout(0.5), name="dropout_0", input="embedding")
    logger.info("Added Dropout Node")
    for i in xrange(num_hidden_layers):
        last_dropout_name = "dropout_%s" % i
        forward_name, backward_name, dropout_name = ["%s_%s" % (k, i + 1) for k in ["forward", "backward", "dropout"]]
        model.add_node(RNN_CLASS(output_dim=hidden_layer_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True), name=forward_name, input=last_dropout_name)
        logger.info("Added %s forward node[%s]" % (RNN_LAYER_TYPE, i+1))
        model.add_node(RNN_CLASS(output_dim=hidden_layer_size, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True, go_backwards=True), name=backward_name, input=last_dropout_name)
        logger.info("Added %s backward node[%s]" % (RNN_LAYER_TYPE, i+1))
        model.add_node(Dropout(0.5), name=dropout_name, inputs=[forward_name, backward_name])
        logger.info("Added Dropout node[%s]" % (i+1))
    output_names = []
    for i, output_task_size in enumerate(output_size):
        tdd_name, output_name = "tdd_%s" % i, "output_%s" % i
        model.add_node(TimeDistributedDense(output_task_size, activation="softmax"), name=tdd_name, input=dropout_name)
        logger.info("Added TimeDistributedDense node %s with output_size %s" % (i, output_task_size))
        model.add_output(name=output_name, input=tdd_name)
        output_names.append(output_name)
    logger.info("Added Output node")
    logger.info("Created model with following config:\n%s" % model.get_config())
    logger.info("Compiling model with optimizer %s" % optimizer)
    start_time = time.time()
    model.compile(optimizer, {k: 'categorical_crossentropy' for k in output_names})
    total_time = time.time() - start_time
    logger.info("Model compiled in %.4f seconds." % total_time)
    return model, output_names 
開發者ID:napsternxg,項目名稱:DeepSequenceClassification,代碼行數:41,代碼來源:model.py

示例11: create_res_texture_net

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import Graph [as 別名]
def create_res_texture_net(input_rows, input_cols, num_res_filters=128,
        res_out_activation='linear', activation='relu', num_res_blocks=5, depth=3):
    '''Adds a series of residual blocks at each resolution scale, rather than just
    the minimium one.
    '''
    net = Graph()
    net.add_input('x', input_shape=(3, input_rows, input_cols))
    add_conv_block(net, 'in0', 'x', num_res_filters // 4, 9, activation=activation)
    last_name = 'in0'
    # scale down input to max depth with a series of strided convolutions
    for scale_i in range(depth):
        num_scale_filters = num_res_filters - scale_i * 8 # // (2 ** scale_i) # (depth - scale_i - 1))
        scale_name = 'down_{}'.format(scale_i)
        add_conv_block(net, scale_name, last_name, num_scale_filters, 3, subsample=(2, 2), activation=activation)
        last_name = scale_name
    # add a series of residual blocks at each scale, from smallest to largest
    for scale_i in reversed(range(depth)):
        num_scale_filters = num_res_filters - scale_i * 8 # // (2 ** scale_i) # (depth - scale_i - 1))
        last_scale_name = last_name
        for res_i in range(num_res_blocks):
            block_name = 'res_{}_{}'.format(scale_i, res_i)
            add_conv_block(net, block_name + '_b0', last_name, num_res_filters, 3, activation=activation)
            add_conv_block(net, block_name + '_b1', block_name + '_b0', num_res_filters, 1, activation='linear')
            if last_name == last_scale_name:
                # tranform residual connection to same number of filters
                add_conv_block(net, block_name + '_res', last_name, num_res_filters, 1, activation='linear')
            else:
                # no transform needed when the last node was part of the current residual block
                net.add_node(Layer(), block_name + '_res', last_name)
            net.add_node(Activation(res_out_activation), block_name, merge_mode='sum', inputs=[block_name + '_b1', block_name + '_res'])
            last_name = block_name
        # theano doesn't seem to support fractionally-strided convolutions at the moment
        up_name = 'up_{}'.format(scale_i)
        net.add_node(UpSampling2D(), up_name, last_name)
        last_name = up_name
        last_scale_name = up_name
    # final output
    add_conv_block(net, 'out', last_name, 3, 9, activation='linear')
    net.add_node(Activation('linear'), 'texture_rgb', 'out', create_output=True)
    return net 
開發者ID:awentzonline,項目名稱:keras-rtst,代碼行數:42,代碼來源:girthy.py

示例12: comp_two_path

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import Graph [as 別名]
def comp_two_path(self):
        '''
        compiles two-path model, takes in a 4x33x33 patch and assesses global and local paths, then merges the results.
        '''
        print 'Compiling two-path model...'
        model = Graph()
        model.add_input(name='input', input_shape=(self.n_chan, 33, 33))

        # local pathway, first convolution/pooling
        model.add_node(Convolution2D(64, 7, 7, border_mode='valid', activation='relu', W_regularizer=l1l2(l1=0.01, l2=0.01)), name='local_c1', input= 'input')
        model.add_node(MaxPooling2D(pool_size=(4,4), strides=(1,1), border_mode='valid'), name='local_p1', input='local_c1')

        # local pathway, second convolution/pooling
        model.add_node(Dropout(0.5), name='drop_lp1', input='local_p1')
        model.add_node(Convolution2D(64, 3, 3, border_mode='valid', activation='relu', W_regularizer=l1l2(l1=0.01, l2=0.01)), name='local_c2', input='drop_lp1')
        model.add_node(MaxPooling2D(pool_size=(2,2), strides=(1,1), border_mode='valid'), name='local_p2', input='local_c2')

        # global pathway
        model.add_node(Convolution2D(160, 13, 13, border_mode='valid', activation='relu', W_regularizer=l1l2(l1=0.01, l2=0.01)), name='global', input='input')

        # merge local and global pathways
        model.add_node(Dropout(0.5), name='drop_lp2', input='local_p2')
        model.add_node(Dropout(0.5), name='drop_g', input='global')
        model.add_node(Convolution2D(5, 21, 21, border_mode='valid', activation='relu',  W_regularizer=l1l2(l1=0.01, l2=0.01)), name='merge', inputs=['drop_lp2', 'drop_g'], merge_mode='concat', concat_axis=1)

        # Flatten output of 5x1x1 to 1x5, perform softmax
        model.add_node(Flatten(), name='flatten', input='merge')
        model.add_node(Dense(5, activation='softmax'), name='dense_output', input='flatten')
        model.add_output(name='output', input='dense_output')

        sgd = SGD(lr=0.005, decay=0.1, momentum=0.9)
        model.compile('sgd', loss={'output':'categorical_crossentropy'})
        print 'Done.'
        return model 
開發者ID:naldeborgh7575,項目名稱:brain_segmentation,代碼行數:36,代碼來源:Segmentation_Models.py

示例13: get_model

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import Graph [as 別名]
def get_model(inputdim, outputdim, regularization_strength=0.01, lr=0.000, cosine=False, **kwargs):
    transformation = Dense(inputdim, init='identity',
                           W_constraint=Orthogonal())

    model = Graph()
    model.add_input(name='embeddings1', input_shape=(inputdim,))
    model.add_input(name='embeddings2', input_shape=(inputdim,))
    model.add_shared_node(transformation, name='transformation',
                          inputs=['embeddings1', 'embeddings2'],
                          outputs=['transformed1', 'transformed2'])
    model.add_node(Lambda(lambda x: x[:, :outputdim]), input='transformed1', name='projected1')
    model.add_node(Lambda(lambda x: -x[:, :outputdim]), input='transformed2', name='negprojected2')

    if cosine:
        model.add_node(Lambda(lambda x:  x / K.reshape(K.sqrt(K.sum(x * x, axis=1)), (x.shape[0], 1))),
                       name='normalized1', input='projected1')
        model.add_node(Lambda(lambda x:  x / K.reshape(K.sqrt(K.sum(x * x, axis=1)), (x.shape[0], 1))),
                       name='negnormalized2', input='negprojected2')
        model.add_node(Lambda(lambda x: K.reshape(K.sum(x, axis=1), (x.shape[0], 1))),
                       name='distances', inputs=['normalized1', 'negnormalized2'], merge_mode='mul')
    else:
        model.add_node(Lambda(lambda x: K.reshape(K.sqrt(K.sum(x * x, axis=1)), (x.shape[0], 1))),
                       name='distances', inputs=['projected1', 'negprojected2'], merge_mode='sum')

    model.add_output(name='y', input='distances')
    model.compile(loss={'y': lambda y, d: K.mean(y * d)}, optimizer=SimpleSGD())
    return model 
開發者ID:williamleif,項目名稱:socialsent,代碼行數:29,代碼來源:embedding_transformer.py


注:本文中的keras.models.Graph方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。