当前位置: 首页>>代码示例>>Python>>正文


Python Sequential.get_input方法代码示例

本文整理汇总了Python中keras.models.Sequential.get_input方法的典型用法代码示例。如果您正苦于以下问题:Python Sequential.get_input方法的具体用法?Python Sequential.get_input怎么用?Python Sequential.get_input使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.models.Sequential的用法示例。


在下文中一共展示了Sequential.get_input方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_mask_loss_network

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_input [as 别名]
def test_mask_loss_network():
    model = Sequential()
    model.add(Dense(16*16, input_dim=16*16))
    model.add(Reshape((1, 16, 16)))
    net_out = model.get_output()

    net_in = model.get_input()
    th_mask = T.tensor4()
    loss = mask_loss(th_mask, net_out)['loss']
    updates = Adam().get_updates(model.params, model.constraints, loss)
    train_fn = theano.function([th_mask, net_in], [loss], updates=updates)

    nb_batches = 32
    mask_idx = next(masks(64*nb_batches, scales=[0.25]))
    z = np.random.uniform(low=-1, high=1, size=mask_idx.shape).reshape((-1, 16*16)).astype(np.float32)
    first_loss = 0

    epochs = 30
    nb_batches = 10
    for i, mask_idx in enumerate(itertools.islice(masks(64*nb_batches, scales=[0.25]), epochs)):
        z = np.random.uniform(low=-1, high=1, size=mask_idx.shape
                              ).reshape((-1, 16*16)).astype(np.float32)
        loss = train_fn(mask_idx, z)
        # print(loss)
        if i == 0:
            first_loss = loss

    assert first_loss > loss
开发者ID:GALI472,项目名称:deepdecoder,代码行数:30,代码来源:test_gpu_only_mask_loss.py

示例2: __init__

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_input [as 别名]
      def __init__(self):
          left = Sequential()
          left.add(Dense(784, 50))
          left.add(Activation('relu'))
          
          model = Sequential()
          model.add(Merge([left, left], mode='sum'))
          
          model.add(Dense(50, 10))
          model.add(Activation('softmax'))
          pdb.set_trace()

          model = Sequential()

          left = Sequential()
          num_kernel = 32
          l1_penalty = 0.0001
          b_mode = 'full'
          left.add(Convolution2D(num_kernel, 3, 2, 2,  W_regularizer=l1(l1_penalty), border_mode=b_mode))
          left.add(Convolution2D(num_kernel, num_kernel, 2, 2, W_regularizer=l1(l1_penalty), border_mode=b_mode))
          left.add(LeakyReLU(0.1))
          #left.add(Activation('relu'))
          left.add(MaxPooling2D(poolsize=(2, 2)))
          #left.add(Convolution2D(num_kernel, 3, 2, 2,  W_regularizer=l1(l1_penalty), border_mode=b_mode))
          #left.add(Convolution2D(num_kernel, num_kernel, 2, 2, W_regularizer=l1(l1_penalty), border_mode=b_mode))
          #left.add(LeakyReLU(0.1))
          ##left.add(Activation('relu'))
          #left.add(MaxPooling2D(poolsize=(2, 2)))

          model.add(Merge([left, left], mode='sum'))
          pdb.set_trace()
          self.f = theano.function(model.get_input(), model.get_output())
开发者ID:yangli625,项目名称:ReId_theano,代码行数:34,代码来源:myModel1.py

示例3: TestOrthoRNN

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_input [as 别名]
class TestOrthoRNN(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        super(TestOrthoRNN, self).__init__(*args, **kwargs)
        self.input_dim = 2
        self.state_dim = 2
        self.model = Recursive(return_sequences=True)
        self.model.add_input('input', ndim=3)  # Input is 3D tensor
        self.model.add_state('h', dim=self.state_dim)
        self.model.add_node(Dense(self.input_dim, self.state_dim,
                                  init='one'), name='i2h',
                            inputs=['input', ])
        self.model.add_node(Dense(self.state_dim, self.state_dim,
                                  init='orthogonal'), name='h2h',
                            inputs=['h', ])
        self.model.add_node(Lambda(lambda x: x), name='rec',
                            inputs=['i2h', 'h2h'], merge_mode='sum',
                            return_state='h',
                            create_output=True)

        self.model2 = Sequential()
        self.model2.add(SimpleRNN(input_dim=self.input_dim, activation='linear',
                                  inner_init='one',
                                  output_dim=self.state_dim, init='one',
                                  return_sequences=True))
        U = self.model.nodes['h2h'].W.get_value()
        self.model2.layers[0].U.set_value(U)

    def test_step(self):
        XX = T.matrix()
        HH = T.matrix()
        A = self.model._step(XX, HH)
        F = function([XX, HH], A, on_unused_input='warn')
        x = np.ones((1, 2))
        h = np.ones((1, 2))
        y = F(x, h)
        assert(y[-1].shape == (1, 2))

    def test_get_get_output(self):
        X = self.model.get_input()
        Y = self.model._get_output()
        F = function([X], Y, allow_input_downcast=True)

        x = np.ones((3, 5, self.input_dim))
        y = F(x)
        print y

        X2 = self.model2.get_input()
        Y2 = self.model2.get_output()
        F2 = function([X2], Y2)
        y2 = F2(x)

        assert_allclose(y2, y[-1])
开发者ID:mattsqerror,项目名称:seya,代码行数:54,代码来源:test_containers.py

示例4: TestRecursive

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_input [as 别名]
class TestRecursive(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        super(TestRecursive, self).__init__(*args, **kwargs)
        self.input_dim = 2
        self.state_dim = 2
        self.model = Recursive(return_sequences=True)
        self.model.add_input('input', ndim=3)  # Input is 3D tensor
        self.model.add_state('h', dim=self.state_dim)
        self.model.add_node(Dense(self.input_dim + self.state_dim, self.state_dim,
                                  init='one'), name='rec',
                            inputs=['input', 'h'],
                            return_state='h')
        self.model.add_node(Activation('linear'), name='out', input='rec',
                            create_output=True)

        self.model2 = Sequential()
        self.model2.add(SimpleRNN(input_dim=self.input_dim, activation='linear',
                                  inner_init='one',
                                  output_dim=self.state_dim, init='one',
                                  return_sequences=True))

    def test_step(self):
        XX = T.matrix()
        HH = T.matrix()
        A = self.model._step(XX, HH)
        F = function([XX, HH], A, allow_input_downcast=True)
        x = np.ones((1, 2))
        h = np.ones((1, 2))
        y = F(x, h)
        r = np.asarray([[4., 4.]])
        assert_allclose([r, r], y)

    def test_get_get_output(self):
        X = self.model.get_input()
        Y = self.model._get_output()
        F = function([X], Y, allow_input_downcast=True)

        x = np.ones((3, 5, self.input_dim)).astype(floatX)
        y = F(x)
        print y

        X2 = self.model2.get_input()
        Y2 = self.model2.get_output()
        F2 = function([X2], Y2)
        y2 = F2(x)

        assert_allclose(y2, y[1])
开发者ID:berleon,项目名称:seya,代码行数:49,代码来源:test_containers.py

示例5: BatchNormalization

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_input [as 别名]
vc = BatchNormalization()
model.add(vc)
model.add(Flatten())
#model.add(Dense(nb_classes))
model.add(Activation('softmax'))


model.load_weights("/data/lisatmp4/sarath/data/output/conv/1/weights.hdf5")#/data/lisatmp4/chinna/data/ift6268/temp/1/weights.hdf5")

# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)



convout = theano.function([model.get_input(train=False)], vc.get_output(train=False))
t0 = time.clock()
[layer_output] = convout(im)
print(layer_output.shape)


dpath = "/data/lisatmp4/chinna/data/ift6268/temp/1/"

for i in range(0,10):
	convert_to_image(layer_output[i],dpath+str(i)+"old.jpg")
	layer_output[i] = add_gnoise_util(layer_output[i])
	print(max(layer_output[i].flatten()))
	convert_to_image(layer_output[i],dpath+str(i)+".jpg")	
print ("Time")
print (time.clock() - t0)
开发者ID:chinnadhurai,项目名称:machine_vision_course,代码行数:32,代码来源:q2.py

示例6: SGD

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_input [as 别名]
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))

sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)

model.load_weights(model_fname)

# from keras.utils.visualize_util import plot
# plot(model, to_file='../other/figures/cnn_model.png')
# exit()

convout1_f = theano.function([model.get_input(train=False)], convout1.get_output(train=False))
convout2_f = theano.function([model.get_input(train=False)], convout2.get_output(train=False))

# Convolution layer 1 weights
W = model.layers[0].W.get_value(borrow=True)
W = np.squeeze(W)
print("W shape : ", W.shape)

pl.figure(figsize=(15, 15))
pl.title('conv1 weights')
nice_imshow(pl.gca(), make_mosaic(W, 6, 6), cmap=cmap)
pl.savefig('../other/figures/cnn_weights.png',bbox_inches='tight', dpi=200)
pl.show()


# Visualize convolution 1 result (after activation)
开发者ID:naveensr89,项目名称:chars74k_nsr,代码行数:33,代码来源:cnn_visualize.py

示例7: get_sample

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_input [as 别名]
    I, V, sw = get_sample(n_bits=input_dim, max_size=20, min_size=1, batch_size=100)

    loss1 = model.train_on_batch(I, V, sample_weight=sw)
    loss2 = lstm.train_on_batch(I, V, sample_weight=sw)

    progbar.add(1, values=[("NTM", loss1), ("LSTM", loss2)])

    if e % 500 == 0:
        print("")
        acc1 = test_model(model, 'ntm.png')
        acc2 = test_model(lstm, 'lstm.png')
        print("NTM  test acc: {}".format(acc1))
        print("LSTM test acc: {}".format(acc2))

##### VISUALIZATION #####
X = model.get_input()
Y = ntm.get_full_output()[0:3]  # (memory over time, read_vectors, write_vectors)
F = function([X], Y, allow_input_downcast=True)

inp, out, sw = get_sample(1, 8, 21, 20)
mem, read, write = F(inp.astype('float32'))
Y = model.predict(inp)

plt.figure(figsize=(15, 12))

plt.subplot(221)
plt.imshow(write[0])
plt.xlabel('memory location')
plt.ylabel('time')
plt.title('write')
开发者ID:amitbeka,项目名称:keras,代码行数:32,代码来源:neural_turing_machine_copy.py

示例8: Sequential

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_input [as 别名]
detector.add(Dropout(.3))
detector.add(Dense(1)) # 1: Yes, it belongs to S, 0: fake!
detector.add(Activation('sigmoid'))

# Fully Connected model

sampler = Sequential()
sampler.add(Dense(dim, input_dim=dim))
sampler.add(lrelu())
sampler.add(Dense(dim))
sampler.add(lrelu())
sampler.add(Dense(mnist_dim))
sampler.add(Activation('sigmoid'))

# This is G itself!!!
sample_fake = theano.function([sampler.get_input()], sampler.get_output())

# We add the detector G on top, but it won't be adapted with this cost function.
# But here is a dirty hack: Theano shared variables on the GPU are the same for
# `detector` and `detector_no_grad`, so, when we adapt `detector` the values of
# `detector_no_grad` will be updated as well. But this only happens following the
# correct gradients.
# Don't you love pointers? Aliasing can be our friend sometimes.
detector.trainable = False
sampler.add(detector)

opt_g = Adam(lr=.001) # I got better results when
                      # detector's learning rate is faster
sampler.compile(loss='binary_crossentropy', optimizer=opt_g)

# debug
开发者ID:nPellejero,项目名称:deepNet,代码行数:33,代码来源:gan.py

示例9: create_input

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_input [as 别名]
x = create_input(sentence)

# build the model: 2 stacked LSTM
print("Build model...")
model = Sequential()
first_layer = LSTM(512, return_sequences=True, input_shape=(None, len(chars)))
model.add(first_layer)
model.add(Dropout(0.5))
second_layer = LSTM(512, return_sequences=True)
model.add(second_layer)
model.add(Dropout(0.5))
model.add(TimeDistributedDense(len(chars)))
model.add(Activation("softmax"))

print("creating function")
layer_output = theano.function([model.get_input(train=False)], second_layer.get_output(train=False))

W = layer_output(x)[0]
print(W.shape)

dists = []
for i in xrange(W.shape[0]):
    for j in xrange(i + 1, W.shape[0]):
        # m = (W[i] + W[j]) / 2
        # d = sum([cosine(W[k], m) for k in xrange(i, j)])
        d = euclidean(W[i], W[j])
        dists.append((d, i, j))

dists.sort()
for d, i, j in dists[:100]:
    print(sentence, i, j, d)
开发者ID:pombredanne,项目名称:rnn-lang-model,代码行数:33,代码来源:summarize_text.py

示例10: print

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_input [as 别名]
# build a loss function that maximizes the activation
# of the nth filter of the layer considered
layer_output = layer_dict[layer_name].get_output()
print("YODA Layer-output shape",layer_output.shape)
loss = K.mean(layer_output[:,filter_index])

model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch)



input_img_data = [X_train[0,:,:,:]]
sp.misc.imsave('test.jpg',input_img_data)



input_img = model.get_input() 
score = model.evaluate(X_test, Y_test, batch_size=batch_size)
grads = K.gradients(loss,input_img)
iterate = K.function([input_img], [loss, grads])


print("YODA_1")
step = 0.01
for i in range(10):
   loss_value, grads_value = iterate([input_img_data])
   input_img_data += grads_value*step
score = model.predict_stochastic(input_img_data,batch_size=batch_size)
print(score)
print("YODA")
json_string = model.to_json()
open('model_200_arch.json', 'w').write(json_string)
开发者ID:Riashat,项目名称:Active-Learning-Bayesian-Convolutional-Neural-Networks,代码行数:33,代码来源:training.py

示例11: diffraction

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import get_input [as 别名]

#.........这里部分代码省略.........
    print('Predicting')
    start = time.clock()
    predicted_output = model.predict(X_test, batch_size=batch_size)
    print('The prediction time for 2000 samples is:',time.clock()-start)
    np.save('labels',Y_test)
    np.save('predicted_output',predicted_output)
    print('Predcited class',predicted_output)
    i = 1
    margin = 5
    n = 15
    # Visualize the first layer of convolutions on an input image
    X = X_test[i:i+1]
    img =X_test[0,:,:,:]
    img_width = X.shape[2]
    img_height = X.shape[3]
    width = n * img_width + (n - 1) * margin
    height = n * img_height + (n - 1) * margin

    stitched_filters=np.zeros((1,width,height))
    for i in range(n):
        for j in range(n):
            img =X_test[n,:,:,:]
            stitched_filters[:, (img_width + margin) * i: (img_width + margin)	* i + img_width, (img_height + margin) * j:(img_height + margin) * j + img_height] = img

    fb = np.zeros((width,height))
    fb = stitched_filters[0]
    imsave('conv.png',fb )



    # Visualize weights
    W = model.layers[0].W.get_value(borrow=True)
    W = np.squeeze(W)
    print("W shape : ", W.shape[0], W.shape[1:])
    n = 6
    img_width = W.shape[1]
    img_height = W.shape[2]
    width = n * img_width + (n - 1) * margin
    height = n * img_height + (n - 1) * margin

    stitched_filters=np.zeros((1,width,height))
    for i in range(n):
        for j in range(n):
            index=i*n+j
            if index < W.shape[0]:
                img =W[j]
                stitched_filters[:, (img_width + margin) * i: (img_width + margin)  * i + img_width, (img_height + margin) * j:(img_height + margin) * j + img_height] = img

    fb = np.zeros((width,height))
    fb = stitched_filters[0]
    imsave('weight.png',fb )





    # Visualize convolution result (after activation)
    convout1_f = theano.function([model.get_input(train=False)], convout1.get_output(train=False))
    W = convout1_f(X)
    W = np.squeeze(W)
    print("C1 shape : ", W.shape)

    n=6
    img_width = W.shape[1]
    img_height = W.shape[2]
    width = n * img_width + (n - 1) * margin
    height = n * img_height + (n - 1) * margin

    stitched_filters=np.zeros((1,width,height))
    for i in range(n):
        for j in range(n):
            index=i*n+j
            if index < W.shape[0]:
                #			print("index is", index)
                img =W[j]
                stitched_filters[:, (img_width + margin) * i: (img_width + margin)  * i + img_width, (img_height + margin) * j:(img_height + margin) * j + img_height] = img
    ff = np.zeros((width,height))
    ff = stitched_filters[0]
    plt.imshow(ff)
    plt.show()
    imsave('conf1.png',ff )

    print('Ploting Results')
    Y_predicted = np.zeros(len(predicted_output))
    for i in range(len(predicted_output)):
        if np.round(predicted_output[i,0]) ==1:
           Y_predicted[i] = 0
        else:
           Y_predicted[i] = 1
           
    xxx = range(len(Y_test))
    plt.subplot(2, 1, 1)
    plt.scatter(xxx,Y_test)
    plt.title('Expected')
    plt.ylim((-0.2, 1.2))
    plt.subplot(2, 1, 2)
    plt.scatter(xxx,Y_predicted)
    plt.title('Predicted')
    plt.ylim((-0.2, 1.2))
    plt.show()
开发者ID:xifengbishu,项目名称:KERAS2,代码行数:104,代码来源:all.py


注:本文中的keras.models.Sequential.get_input方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。