当前位置: 首页>>代码示例>>Python>>正文


Python Layer.one_filter_out方法代码示例

本文整理汇总了Python中layer.Layer.one_filter_out方法的典型用法代码示例。如果您正苦于以下问题:Python Layer.one_filter_out方法的具体用法?Python Layer.one_filter_out怎么用?Python Layer.one_filter_out使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在layer.Layer的用法示例。


在下文中一共展示了Layer.one_filter_out方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from layer import Layer [as 别名]
# 或者: from layer.Layer import one_filter_out [as 别名]
def main(argv=None):

    train_data_node=tf.placeholder(tf.float32, shape=(BATCH_SIZE, INPUT_WIDTH, INPUT_WIDTH, INPUT_DEPTH))
    train_labels_node=tf.placeholder(tf.int32, shape=(BATCH_SIZE,))
    
    input_layer = Layer("input_layer", paddingMethod="VALID")
    
    output1 = input_layer.convolve(train_data_node, (3, 3, 3, 80), (80))
    
    conv1 = Layer("conv1_layer")
    out_conv1 = conv1.convolve(output1, weight_shape=(3, 3, 80, 80), bias_shape=(80))
    
    conv2 = Layer("conv2_layer")
    out_conv2 = conv2.convolve(out_conv1, weight_shape=(3, 3, 80, 80), bias_shape=(80))
    
    conv3 = Layer("conv3_layer")
    out_conv3 = conv3.convolve(out_conv2, weight_shape=(3, 3, 80, 80), bias_shape=(80))
    
    conv4 = Layer("conv4_layer")
    out_conv4 = conv4.convolve(out_conv3, weight_shape=(3, 3, 80, 80), bias_shape=(80))
    
    conv5 = Layer("conv5_layer")
    out_conv5 = conv5.convolve(out_conv4, weight_shape=(3, 3, 80, 80), bias_shape=(80))
    
    conv6 = Layer("conv6_layer")
    out_conv6 = conv6.convolve(out_conv5, weight_shape=(3, 3, 80, 80), bias_shape=(80))
    
    conv7 = Layer("conv7_layer")
    out_conv7 = conv7.convolve(out_conv6, weight_shape=(3, 3, 80, 80), bias_shape=(80))
    
    conv8 = Layer("conv8_layer")
    logits = conv8.one_filter_out(out_conv7, BOARD_SIZE)
    print("logits", logits)
    loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits, train_labels_node))

    train_prediction=tf.nn.softmax(logits)

    batch=tf.Variable(0)
    learning_rate=tf.train.exponential_decay(0.01,batch*BATCH_SIZE, train_step, 0.95,staircase=True)
    
    opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
    saver=tf.train.Saver()

    with tf.Session() as sess:
        tf.initialize_all_variables().run()
        print("Initialized!")
        if not tf.app.flags.FLAGS.training:
            ckpt = tf.train.get_checkpoint_state(FLAGS.check_point_dir)
            if ckpt and ckpt.model_checkpoint_dir_path:
                print("restoring a model")
                saver.restore(sess,ckpt.model_checkpoint_dir_path)

        read_raw_data("data/train_games.dat")
        offset1, offset2 = 0, 0
        step=1
        training_step=10000
        while(nEpoch < num_epochs):
            off1, off2 = prepare_batch(offset1, offset2)
            x = batch_states.astype(np.float32)
            y = batch_labels.astype(np.int32)
            feed_diction = {train_data_node:x, 
                            train_labels_node:y}
            _, loss_v, predictions=sess.run([opt,loss, train_prediction], feed_dict=feed_diction)
            print("epoch:", nEpoch, "loss: ", loss_v, "error rate:", error_rate(predictions, batch_labels))
            offset1, offset2 = off1,off2
            step = step + 1

        tf.save(sess,FLAGS.check_point_dir+"/model.ckpt")
开发者ID:chenyangh,项目名称:pnnhex,代码行数:70,代码来源:Test.py

示例2: Layer

# 需要导入模块: from layer import Layer [as 别名]
# 或者: from layer.Layer import one_filter_out [as 别名]
out_conv3 = conv3.convolve(out_conv2, weight_shape=(3, 3, 80, 80), bias_shape=(80))

conv4 = Layer("conv4_layer")
out_conv4 = conv4.convolve(out_conv3, weight_shape=(3, 3, 80, 80), bias_shape=(80))

conv5 = Layer("conv5_layer")
out_conv5 = conv5.convolve(out_conv4, weight_shape=(3, 3, 80, 80), bias_shape=(80))

conv6 = Layer("conv6_layer")
out_conv6 = conv6.convolve(out_conv5, weight_shape=(3, 3, 80, 80), bias_shape=(80))

conv7 = Layer("conv7_layer")
out_conv7 = conv7.convolve(out_conv6, weight_shape=(3, 3, 80, 80), bias_shape=(80))

conv8 = Layer("conv8_layer")
logits = conv8.one_filter_out(out_conv7, BOARD_SIZE)
print("logits", logits)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, input_labels))

learning_rate = 0.01

opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)

with tf.Session() as sess:
    sess.run(tf.initialize_all_variables())
    print("Initialized!")
    print("loss looks ", loss)
    read_raw_data("data/train_games.dat")
    offset1, offset2 = 0, 0
    while(nEpoch < 1):
        off1, off2 = prepare_batch(offset1, offset2)
开发者ID:chenyangh,项目名称:pnnhex,代码行数:33,代码来源:supervised.py


注:本文中的layer.Layer.one_filter_out方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。