當前位置: 首頁>>代碼示例>>Python>>正文


Python backend.learning_phase方法代碼示例

本文整理匯總了Python中keras.backend.learning_phase方法的典型用法代碼示例。如果您正苦於以下問題:Python backend.learning_phase方法的具體用法?Python backend.learning_phase怎麽用?Python backend.learning_phase使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在keras.backend的用法示例。


在下文中一共展示了backend.learning_phase方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_deep_representations

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import learning_phase [as 別名]
def get_deep_representations(model, X, batch_size=256):
    """
    TODO
    :param model:
    :param X:
    :param batch_size:
    :return:
    """
    # last hidden layer is always at index -4
    output_dim = model.layers[-4].output.shape[-1].value
    get_encoding = K.function(
        [model.layers[0].input, K.learning_phase()],
        [model.layers[-4].output]
    )

    n_batches = int(np.ceil(X.shape[0] / float(batch_size)))
    output = np.zeros(shape=(len(X), output_dim))
    for i in range(n_batches):
        output[i * batch_size:(i + 1) * batch_size] = \
            get_encoding([X[i * batch_size:(i + 1) * batch_size], 0])[0]

    return output 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:24,代碼來源:util.py

示例2: one_shot_method

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import learning_phase [as 別名]
def one_shot_method(prediction, x, curr_sample, curr_target, p_t):
    grad_est = np.zeros((BATCH_SIZE, IMAGE_ROWS, IMAGE_COLS, NUM_CHANNELS))
    DELTA = np.random.randint(2, size=(BATCH_SIZE, IMAGE_ROWS, IMAGE_COLS, NUM_CHANNELS))
    np.place(DELTA, DELTA==0, -1)

    y_plus = np.clip(curr_sample + args.delta * DELTA, CLIP_MIN, CLIP_MAX)
    y_minus = np.clip(curr_sample - args.delta * DELTA, CLIP_MIN, CLIP_MAX)

    if args.CW_loss == 0:
        pred_plus = K.get_session().run([prediction], feed_dict={x: y_plus, K.learning_phase(): 0})[0]
        pred_plus_t = pred_plus[np.arange(BATCH_SIZE), list(curr_target)]

        pred_minus = K.get_session().run([prediction], feed_dict={x: y_minus, K.learning_phase(): 0})[0]
        pred_minus_t = pred_minus[np.arange(BATCH_SIZE), list(curr_target)]

        num_est = (pred_plus_t - pred_minus_t)

    grad_est = num_est[:, None, None, None]/(args.delta * DELTA)

    # Getting gradient of the loss
    if args.CW_loss == 0:
        loss_grad = -1.0 * grad_est/p_t[:, None, None, None]

    return loss_grad 
開發者ID:sunblaze-ucb,項目名稱:blackbox-attacks,代碼行數:26,代碼來源:cifar10_query_based.py

示例3: image_detection

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import learning_phase [as 別名]
def image_detection(sess, image_path, image_file, colors):
    # Preprocess your image
    image, image_data = preprocess_image(image_path + image_file, model_image_size = (416, 416))
    
    # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
    # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolo_model.input:image_data, K.learning_phase():0})

    # Print predictions info
    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    
    # Draw bounding boxes on the image file
    image = draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

    # Save the predicted bounding box on the image
    #image.save(os.path.join("out", image_file), quality=90)
    cv2.imwrite(os.path.join("out", "tiny_yolo_" + image_file), image, [cv2.IMWRITE_JPEG_QUALITY, 90])
    
    return out_scores, out_boxes, out_classes 
開發者ID:kaka-lin,項目名稱:object-detection,代碼行數:21,代碼來源:test_tiny_yolo.py

示例4: image_detection

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import learning_phase [as 別名]
def image_detection(sess, image_path, image_file, colors):
    # Preprocess your image
    image, image_data = preprocess_image(image_path + image_file, model_image_size = (416, 416))
    
    # Run the session with the correct tensors and choose the correct placeholders in the feed_dict.
    # You'll need to use feed_dict={yolo_model.input: ... , K.learning_phase(): 0})
    out_scores, out_boxes, out_classes = sess.run([scores, boxes, classes], feed_dict={yolov3.input:image_data, K.learning_phase():0})

    # Print predictions info
    print('Found {} boxes for {}'.format(len(out_boxes), image_file))
    
    # Draw bounding boxes on the image file
    image = draw_boxes(image, out_scores, out_boxes, out_classes, class_names, colors)

    # Save the predicted bounding box on the image
    #image.save(os.path.join("out", image_file), quality=90)
    cv2.imwrite(os.path.join("out", "yolov3_" + image_file), image, [cv2.IMWRITE_JPEG_QUALITY, 90])
    
    return out_scores, out_boxes, out_classes 
開發者ID:kaka-lin,項目名稱:object-detection,代碼行數:21,代碼來源:test_yolov3.py

示例5: get_feature_map_4

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import learning_phase [as 別名]
def get_feature_map_4(model, im):
    im = im.astype(np.float32)
    dim_ordering = K.image_dim_ordering()
    if dim_ordering == 'th':
        # 'RGB'->'BGR'
        im = im[::-1, :, :]
        # Zero-center by mean pixel
        im[0, :, :] -= 103.939
        im[1, :, :] -= 116.779
        im[2, :, :] -= 123.68
    else:
        # 'RGB'->'BGR'
        im = im[:, :, ::-1]
        # Zero-center by mean pixel
        im[:, :, 0] -= 103.939
        im[:, :, 1] -= 116.779
        im[:, :, 2] -= 123.68
    im = im.transpose((2, 0, 1))
    im = np.expand_dims(im, axis=0)
    inputs = [K.learning_phase()] + model.inputs
    _convout1_f = K.function(inputs, [model.layers[23].output])
    feature_map = _convout1_f([0] + [im])
    feature_map = np.array([feature_map])
    feature_map = feature_map[0, 0, 0, :, :, :]
    return feature_map 
開發者ID:imatge-upc,項目名稱:detection-2016-nipsws,代碼行數:27,代碼來源:features.py

示例6: get_image_descriptor_for_image

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import learning_phase [as 別名]
def get_image_descriptor_for_image(image, model):
    im = cv2.resize(image, (224, 224)).astype(np.float32)
    dim_ordering = K.image_dim_ordering()
    if dim_ordering == 'th':
        # 'RGB'->'BGR'
        im = im[::-1, :, :]
        # Zero-center by mean pixel
        im[0, :, :] -= 103.939
        im[1, :, :] -= 116.779
        im[2, :, :] -= 123.68
    else:
        # 'RGB'->'BGR'
        im = im[:, :, ::-1]
        # Zero-center by mean pixel
        im[:, :, 0] -= 103.939
        im[:, :, 1] -= 116.779
        im[:, :, 2] -= 123.68
    im = im.transpose((2, 0, 1))
    im = np.expand_dims(im, axis=0)
    inputs = [K.learning_phase()] + model.inputs
    _convout1_f = K.function(inputs, [model.layers[33].output])
    return _convout1_f([0] + [im]) 
開發者ID:imatge-upc,項目名稱:detection-2016-nipsws,代碼行數:24,代碼來源:features.py

示例7: get_conv_image_descriptor_for_image

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import learning_phase [as 別名]
def get_conv_image_descriptor_for_image(image, model):
    im = cv2.resize(image, (224, 224)).astype(np.float32)
    dim_ordering = K.image_dim_ordering()
    if dim_ordering == 'th':
        # 'RGB'->'BGR'
        im = im[::-1, :, :]
        # Zero-center by mean pixel
        im[0, :, :] -= 103.939
        im[1, :, :] -= 116.779
        im[2, :, :] -= 123.68
    else:
        # 'RGB'->'BGR'
        im = im[:, :, ::-1]
        # Zero-center by mean pixel
        im[:, :, 0] -= 103.939
        im[:, :, 1] -= 116.779
        im[:, :, 2] -= 123.68
    im = im.transpose((2, 0, 1))
    im = np.expand_dims(im, axis=0)
    inputs = [K.learning_phase()] + model.inputs
    _convout1_f = K.function(inputs, [model.layers[31].output])
    return _convout1_f([0] + [im]) 
開發者ID:imatge-upc,項目名稱:detection-2016-nipsws,代碼行數:24,代碼來源:features.py

示例8: get_mc_predictions

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import learning_phase [as 別名]
def get_mc_predictions(model, X, nb_iter=50, batch_size=256):
    """
    TODO
    :param model:
    :param X:
    :param nb_iter:
    :param batch_size:
    :return:
    """
    output_dim = model.layers[-1].output.shape[-1].value
    get_output = K.function(
        [model.layers[0].input, K.learning_phase()],
        [model.layers[-1].output]
    )

    def predict():
        n_batches = int(np.ceil(X.shape[0] / float(batch_size)))
        output = np.zeros(shape=(len(X), output_dim))
        for i in range(n_batches):
            output[i * batch_size:(i + 1) * batch_size] = \
                get_output([X[i * batch_size:(i + 1) * batch_size], 1])[0]
        return output

    preds_mc = []
    for i in tqdm(range(nb_iter)):
        preds_mc.append(predict())

    return np.asarray(preds_mc) 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:30,代碼來源:util.py

示例9: _get_learning_phase

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import learning_phase [as 別名]
def _get_learning_phase(self):
        if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
            return [K.learning_phase()]
        else:
            return [] 
開發者ID:codekansas,項目名稱:gandlf,代碼行數:7,代碼來源:models.py

示例10: get_activations

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import learning_phase [as 別名]
def get_activations(model, layer_idx, X_batch):
    get_activations = K.function([model.layers[0].input, K.learning_phase()], [model.layers[layer_idx].output,])
    activations = get_activations([X_batch,0])
    return activations 
開發者ID:sergiooramas,項目名稱:tartarus,代碼行數:6,代碼來源:predict.py

示例11: loss

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import learning_phase [as 別名]
def loss(X):
    X = X.reshape((1, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS))
    confidence = K.get_session().run([prediction], feed_dict={x: X, K.learning_phase(): 0})[0]
    # confidence[:,curr_target] = 1e-4
    max_conf_i = np.argmax(confidence, 1)
    max_conf = np.max(confidence, 1)[0]
    if max_conf_i == curr_target:
        return max_conf
    elif max_conf_i != curr_target:
        return -1.0 * max_conf 
開發者ID:sunblaze-ucb,項目名稱:blackbox-attacks,代碼行數:12,代碼來源:particle_swarm_attack.py

示例12: logit_loss

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import learning_phase [as 別名]
def logit_loss(X):
    X = X.reshape((1, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS))
    confidence = K.get_session().run([prediction], feed_dict={x: X, K.learning_phase(): 0})[0]
    # confidence[:,curr_target] = 1e-4
    logits = np.log(confidence)

    logit_t = logits[:, curr_target]
    logits[:, curr_target] = 1e-4
    max_logit_i = np.argmax(logits, 1)
    logit_max = logits[:, max_logit_i]
    return logit_t - logit_max 
開發者ID:sunblaze-ucb,項目名稱:blackbox-attacks,代碼行數:13,代碼來源:particle_swarm_attack.py

示例13: visualize_attention

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import learning_phase [as 別名]
def visualize_attention(test_seq,
    model,
    id2wrd,
    n):
    """
    Visualize the top n words that the model pays attention to. 
    We first do a forward pass and get the output of the LSTM layer.
    THen we apply the function of the Attention layer and get the weights.
    Finally we obtain and print the words of the input sequence 
    that have these weights.


    """

    get_layer_output = K.function([model.layers[0].input, K.learning_phase()], [model.layers[4].output])
    out = get_layer_output([test_seq, ])[0]  # test mode

    att_w = model.layers[5].get_weights()

    eij = np.tanh(np.dot(out[0], att_w[0]))
    ai = np.exp(eij)
    weights = ai/np.sum(ai)
    weights = np.sum(weights,axis=1)

    topKeys = np.argpartition(weights,-n)[-n:]

    print ' '.join([id2wrd[wrd_id] for wrd_id in test_seq[0] if wrd_id != 0.]) 
    
    for k in test_seq[0][topKeys]:
        if k != 0.:
            print id2wrd[k]
    
    return 
開發者ID:AlexGidiotis,項目名稱:Document-Classifier-LSTM,代碼行數:35,代碼來源:utils.py

示例14: __init__

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import learning_phase [as 別名]
def __init__(self, num_rows, num_cols, weights_path='vgg16_weights.h5',
            pool_mode='avg', last_layer='conv5_1', learning_phase=None):
        self.learning_phase = learning_phase
        self.last_layer = last_layer
        self.net = get_model(num_rows, num_cols, weights_path=weights_path,
            pool_mode=pool_mode, last_layer=last_layer)
        self.net_input = self.net.get_layer('vgg_input')
        self._f_layer_outputs = {} 
開發者ID:awentzonline,項目名稱:keras-vgg-buddy,代碼行數:10,代碼來源:models.py

示例15: get_f_layer

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import learning_phase [as 別名]
def get_f_layer(self, layer_name):
        '''Create a function for the response of a layer.'''
        inputs = [self.net_input]
        if self.learning_phase is not None:
            inputs.append(K.learning_phase())
        return K.function(inputs, [self.get_layer_output(layer_name)]) 
開發者ID:awentzonline,項目名稱:keras-vgg-buddy,代碼行數:8,代碼來源:models.py


注:本文中的keras.backend.learning_phase方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。