当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.arg_max函数代码示例

本文整理汇总了Python中tensorflow.arg_max函数的典型用法代码示例。如果您正苦于以下问题:Python arg_max函数的具体用法?Python arg_max怎么用?Python arg_max使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了arg_max函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: cnn_handigit

def cnn_handigit():
    sess = tf.InteractiveSession()

    # paras
    W_conv1 = weight_varible([5, 5, 1, 32])
    b_conv1 = bias_variable([32])

    # conv layer-1
    x = tf.placeholder(tf.float32, [None, 784])
    x_image = tf.reshape(x, [-1, 28, 28, 1])

    h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
    h_pool1 = max_pool_2x2(h_conv1)

    # conv layer-2
    W_conv2 = weight_varible([5, 5, 32, 64])
    b_conv2 = bias_variable([64])

    h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
    h_pool2 = max_pool_2x2(h_conv2)

    # full connection
    W_fc1 = weight_varible([7 * 7 * 64, 1024])
    b_fc1 = bias_variable([1024])

    h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

    # dropout
    keep_prob = tf.placeholder(tf.float32)
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

    # output layer: softmax
    W_fc2 = weight_varible([1024, 10])
    b_fc2 = bias_variable([10])

    y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)
    y_ = tf.placeholder(tf.float32, [None, 10])

    # model training
    cross_entropy = -tf.reduce_sum(y_ * tf.log(y_conv))
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

    correct_prediction = tf.equal(tf.arg_max(y_conv, 1), tf.arg_max(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    sess.run(tf.initialize_all_variables())

    saver = tf.train.Saver()
    tf.add_to_collection('train_op', train_step)
    for i in range(200):
        batch = mnist.train.next_batch(50)
        if i % 100 == 0:
            train_accuacy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
            print("step %d, training accuracy %g"%(i, train_accuacy))
            saver.save(sess,'train_process',global_step=i)  #在保存的时候
        train_step.run(feed_dict = {x: batch[0], y_: batch[1], keep_prob: 0.5})

    # accuacy on test
    print("test accuracy %g"%(accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels, keep_prob: 1.0})))
开发者ID:tuling56,项目名称:Python,代码行数:60,代码来源:cnn_handigit.py

示例2: evaluate

def evaluate(input_x, input_y):
    '''
    评价 文本分类
    :return
        result:预测的结果,哪一维更大
        accuracy:精确度
    '''
    graph = tf.Graph()
    with graph.as_default(), tf.Session() as sess:
        # 恢复模型
        features = tf.placeholder(tf.int32, [None, cnnc.SEQUENCE_LENGTH])
        labels = tf.placeholder(tf.int32, [None, cnnc.FLAGS.num_class])
        logits = cnnc.inference(features)
        predictions = tf.arg_max(logits, 1)
        correct_predictions = tf.equal(predictions, tf.arg_max(labels, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_predictions,
                                          dtype=tf.float32))
        saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            print("SUCESS")
        else:
            print("No checkpoint file found")

        result, accuracy = sess.run([predictions, accuracy], feed_dict={features: input_x, labels: input_y})

    return result, accuracy
开发者ID:IgorWang,项目名称:MachineLearningPracticer,代码行数:28,代码来源:cnnc_eval.py

示例3: compute_accuracy

def compute_accuracy(y_hat, labels, sparse=False):
    """Compute accuracy for a 3-dimensional outputs.

    The prediction is assumed to be made by argmax.

    Parameters
    ----------
    y_hat : tensor, shape (batch_size, n_samples, n_outputs)
        Raw predictions of a neural network. It is not required to convert it
        to softmax, because softmax is a monotonous transform.
    labels : tensor
        True labels. It can have shape (batch_size, n_samples), then each
        values should be an index within [0, n_classes). Or alternatively
        it can have shape (batch_size, n_samples, n_outputs), then for each
        sample a probability distribution with n_outputs values should be
        provided (this case also handles one-hot label encoding). In the
        latter case the correct label is also selected by argmax. Set `sparse`
        parameter to select an appropriate setting.
    sparse : bool, default False
        Whether `labels` are indices or full distributions.

    Returns
    -------
    accuracy : scalar tensor
        Computed accuracy.
    """
    prediction = tf.arg_max(y_hat, 2)
    if sparse:
        labels = tf.cast(labels, prediction.dtype)
    else:
        labels = tf.arg_max(labels, 2)

    return tf.reduce_mean(tf.cast(tf.equal(prediction, labels), tf.float32))
开发者ID:developeralgo8888,项目名称:ufcnn,代码行数:33,代码来源:ufcnn.py

示例4: main

def main(_):
    start_time = time.time()

    data_sets = read_data_sets()

    with tf.Graph().as_default(), tf.Session() as session:
        dictionary_size = len(data_sets.dictionary)

        x = tf.placeholder(tf.float32, [None, dictionary_size])
        W = tf.Variable(tf.zeros([dictionary_size, label_size]))
        b = tf.Variable(tf.zeros([label_size]))
        y = tf.nn.softmax(tf.matmul(x, W) + b)

        y_ = tf.placeholder(tf.float32, [None, label_size])
        cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
        train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)

        tf.initialize_all_variables().run()

        for i in range(1000):
            batch_xs, batch_ys = data_sets.train.next_batch(100)
            train_step.run({x: batch_xs, y_: batch_ys})

        correct_prediction = tf.equal(tf.arg_max(y, 1), tf.arg_max(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        print(accuracy.eval({x: data_sets.validation.inputs, y_: data_sets.validation.labels}))

    print("Elapsed time:", time.time() - start_time)
开发者ID:izeye,项目名称:samples-tensorflow,代码行数:28,代码来源:unknown_softmax.py

示例5: train_neural_network

def train_neural_network(x):
    prediction = neural_network_model(x)

    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(prediction, y))
    optimizer = tf.train.AdadeltaOptimizer().minimize(cost)

    hm_epoches = 10

    with tf.Session() as sess:
        sess.run(tf.initialize_all_variables())

        for epoch in range(hm_epoches):
            epoch_lose = 0

            for _ in range(int(mnist.train.num_example / batch_size)):
                epoch_x, epoch_y = mnist.train.next_batch(batch_size)

                _, c = sess.run([optimizer, cost], feed_dict={x: epoch_x, y: epoch_y})
                epoch_lose += c

            print('Epoch ', epoch, ' complet out of ', hm_epoches, ' lose : ', epoch_lose)

        correct = tf.equal(tf.arg_max(prediction, 1), tf.arg_max(y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct, 'float'))

        print('Accuracy :', accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
开发者ID:Daniel194,项目名称:Machine-Learning,代码行数:26,代码来源:DeepLearning.py

示例6: num_correct_prediction

def num_correct_prediction(logits, labels):
  """Evaluate the quality of the logits at predicting the label.
  Return:
      the number of correct predictions
  """
  correct = tf.equal(tf.arg_max(logits, 1), tf.arg_max(labels, 1))
  correct = tf.cast(correct, tf.int32)
  n_correct = tf.reduce_sum(correct)
  return n_correct
开发者ID:caibobit,项目名称:tensorflowlearning,代码行数:9,代码来源:tools.py

示例7: build_graph

    def build_graph(self):

        x = tf.placeholder(tf.float32, [None, self.window_size, self.dim_word_feat], "x_input")
        y = tf.placeholder(tf.float32, [None, self.output_size], "label_input")

        W1 = self.weight_variable(shape=[2, self.dim_word_feat, 1, self.num_feat_map])
        b1 = self.bias_variable(shape=[self.num_feat_map])

        x_inputs = tf.reshape(x, [-1, self.window_size, self.dim_word_feat, 1])

        # h_conv_1 size: [-1, dwf, ws, nfm]
        h_conv_1 = tf.nn.relu(self.conv_2d(x_inputs, W1) + b1)
        print h_conv_1.get_shape()
        # h_max_pool size: [-1, 1,1, nfm]
        h_max_pool = self.max_pool(h_conv_1)
        print h_max_pool.get_shape()

        # concentrate in none vector
        # sent_vec size: [-1, nfm]
        sent_vec = tf.reshape(h_max_pool, [-1, self.num_feat_map])
        print sent_vec.get_shape()

        W2 = self.weight_variable(shape=[self.num_feat_map, self.output_size])
        b2 = self.bias_variable(shape=[self.output_size])

        logits = tf.matmul(sent_vec, W2) + b2
        print logits.get_shape()

        outputs = tf.nn.softmax(logits)
        print outputs.get_shape()

        # window - level
        cross_entropy = tf.reduce_mean(-tf.reduce_sum(tf.mul(y, tf.log(outputs)), reduction_indices=[1]))
        print cross_entropy.get_shape()
        # # sentence - level
        # y_label = tf.arg_max(y, 1)
        # ltm = self.label_transition_mat([self.output_size + 1, self.output_size])
        #
        # score_golden =  tf.reduce_sum(ltm[])
        # log_add_score

        train_step = tf.train.AdamOptimizer(self.learning_rate).minimize(cross_entropy)

        prediction = tf.arg_max(outputs, 1)
        ori_label = tf.arg_max(y, 1)

        accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, ori_label), tf.float32))

        return dict(
            x=x,
            y=y,
            loss=cross_entropy,
            train=train_step,
            accuracy=accuracy,
            prediction=prediction,
            ori_label=ori_label
        )
开发者ID:staylonging,项目名称:tf,代码行数:57,代码来源:mycnn.py

示例8: accuracy

def accuracy(logits, labels):
  """Evaluate the quality of the logits at predicting the label.
  Args:
    logits: Logits tensor, float - [batch_size, NUM_CLASSES].
    labels: Labels tensor, 
  """
  with tf.name_scope('accuracy') as scope:
      correct = tf.equal(tf.arg_max(logits, 1), tf.arg_max(labels, 1))
      correct = tf.cast(correct, tf.float32)
      accuracy = tf.reduce_mean(correct)*100.0
      tf.summary.scalar(scope+'/accuracy', accuracy)
  return accuracy
开发者ID:caibobit,项目名称:tensorflowlearning,代码行数:12,代码来源:tools.py

示例9: calc_reward

def calc_reward(outputs):
  outputs = outputs[-1]  # look at ONLY THE END of the sequence
  outputs = tf.reshape(outputs, (batch_size, cell_out_size))
  h_a_out = weight_variable((cell_out_size, n_classes))

  p_y = tf.nn.softmax(tf.matmul(outputs, h_a_out))
  max_p_y = tf.arg_max(p_y, 1)
  correct_y = tf.cast(labels_placeholder, tf.int64)

  R = tf.cast(tf.equal(max_p_y, correct_y), tf.float32)  # reward per example

  reward = tf.reduce_mean(R)  # overall reward

  p_loc = gaussian_pdf(mean_locs, sampled_locs)
  p_loc = tf.reshape(p_loc, (batch_size, glimpses * 2))

  R = tf.reshape(R, (batch_size, 1))
  J = tf.concat(1, [tf.log(p_y + 1e-5) * onehot_labels_placeholder, tf.log(
      p_loc + 1e-5) * R])
  J = tf.reduce_sum(J, 1)
  J = tf.reduce_mean(J, 0)
  cost = -J

  optimizer = tf.train.AdamOptimizer(lr)
  train_op = optimizer.minimize(cost)

  return cost, reward, max_p_y, correct_y, train_op
开发者ID:ffmpbgrnn,项目名称:tensorflow_mnist_ram,代码行数:27,代码来源:ram.py

示例10: test_i2v

def test_i2v():
    """Loads the i2v network and applies it to a test image.
    """
    with tf.Session() as sess:
        net = get_i2v_model()
        tf.import_graph_def(net['graph_def'], name='i2v')
        g = tf.get_default_graph()
        names = [op.name for op in g.get_operations()]
        x = g.get_tensor_by_name(names[0] + ':0')
        softmax = g.get_tensor_by_name(names[-3] + ':0')

        from skimage import data
        img = preprocess(data.coffee())[np.newaxis]
        res = np.squeeze(softmax.eval(feed_dict={x: img}))
        print([(res[idx], net['labels'][idx])
               for idx in res.argsort()[-5:][::-1]])

        """Let's visualize the network's gradient activation
        when backpropagated to the original input image.  This
        is effectively telling us which pixels contribute to the
        predicted class or given neuron"""
        pools = [name for name in names if 'pool' in name.split('/')[-1]]
        fig, axs = plt.subplots(1, len(pools))
        for pool_i, poolname in enumerate(pools):
            pool = g.get_tensor_by_name(poolname + ':0')
            pool.get_shape()
            neuron = tf.reduce_max(pool, 1)
            saliency = tf.gradients(neuron, x)
            neuron_idx = tf.arg_max(pool, 1)
            this_res = sess.run([saliency[0], neuron_idx],
                                feed_dict={x: img})

            grad = this_res[0][0] / np.max(np.abs(this_res[0]))
            axs[pool_i].imshow((grad * 128 + 128).astype(np.uint8))
            axs[pool_i].set_title(poolname)
开发者ID:Arn-O,项目名称:kadenze-deep-creative-apps,代码行数:35,代码来源:i2v.py

示例11: train

def train(mnist):
    x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input')
    y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='y-input')
    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
    y = mnist_inference.inference(x, regularizer)
    global_step = tf.Variable(0, trainable=False)

    # 滑动平均操作
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    variable_averages_op = variable_averages.apply(tf.trainable_variables())
    # 损失函数
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.arg_max(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train.num_examples/BATCH_SIZE, LEARNING_RATE_DECAY)
    # 训练过程
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name='train')
    
    # 初始化TF 持久化类
    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.initialize_all_variables().run()

        for i in range(TRAINING_STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys})
            if i % 1000 == 0:
                print("After %d training step(s), loss on training "
                    "batch is %g." % (step, loss_value))
                saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
开发者ID:ZhangXinNan,项目名称:LearnPractice,代码行数:32,代码来源:mnist_train.py

示例12: train_neural_network

def train_neural_network(x):
    prediction = neural_network_model(x)
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = prediction,labels = y))
    optimizer = tf.train.AdamOptimizer().minimize(cost)
    
    #cycles of feed forward and back propagation
    hm_epochs = 10
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        
        for epoch in range(hm_epochs):
            epoch_loss = 0
            
            i = 0
            while i < len(train_x):
                start = i
                end = i+batch_size
                batch_x = np.array(train_x[start:end])
                batch_y = np.array(train_y[start:end])
                
                _,c = sess.run([optimizer,cost],feed_dict = {x: batch_x,y: batch_y})
                epoch_loss += c
                i += batch_size
                
            print('Epoch',epoch+1,'completed out of', hm_epochs,'loss:',epoch_loss)
        correct = tf.equal(tf.arg_max(prediction,1),tf.argmax(y,1))
        
        accuracy = tf.reduce_mean(tf.cast(correct,'float'))
        print('Accuracy: ',accuracy.eval({x:test_x,y:test_y}))
开发者ID:akuchotrani,项目名称:Tensorflow,代码行数:29,代码来源:SentimentNeuralNetwork.py

示例13: infer

def infer(args):
    """
    """
    dataloader = DataLoader(args.input_dict)

    args.seq_length = dataloader.seq_length
    args.char_size = len(dataloader.char_vocab_dict)
    args.phvocab_size = len(dataloader.ph_vocab_dict)

    model = Model(args)

    with tf.Session() as sess:
        sess.run(tf.initialize_all_variables())

        ##
        ## initial state for the model
        ##
        state = sess.run(model.initial_state)

        dataloader.reset_batch_pointer()

        for n in xrange(dataloader.num_batches):
            b = dataloader.next_batch()
            x, y = b
            inx = np.array([sess.run(x), sess.run(x)])
           
            feed = {model.input_data: inx}
            logits = sess.run(model.logits, feed_dict = feed)
            logits = tf.split(0, args.batch_size, logits)
            
            for res in logits:
                result = sess.run(tf.arg_max(res, 1))
                print(result, [dataloader.ph_vocab_invdict[i] for i in result])
开发者ID:datavizweb,项目名称:g2p-tf,代码行数:33,代码来源:g2p.py

示例14: MLP

def MLP(trainFeature, trainLabel, testFeature):
    N1 = trainFeature.shape[0]
    N2 = testFeature.shape[0]
    D = trainFeature.shape[1]
    x = tf.placeholder(tf.float32, [None, D])
    W = tf.Variable(tf.zeros([D, 2]))
    b = tf.Variable(tf.zeros([2]))
    y = tf.nn.softmax(tf.matmul(x, W) + b)
    y_ = tf.placeholder(tf.float32, [None, 2])
    cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
    train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
    init = tf.initialize_all_variables()
    label1 = np.zeros([N1, 2])
    for item in range(N1):
        label1[item][trainLabel[item]] = 1
    sess = tf.Session()
    sess.run(init)
    idx = [i for i in range(N1)]
    for i in range(100):
        randomSamples = random.sample(idx, 5)
        batch_xs = trainFeature[randomSamples, :]
        batch_ys = label1[randomSamples]
        sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
        if i % 10 == 0:
            print(i, sess.run(W), sess.run(b))

    #correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))

    #accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    predicted_label = tf.arg_max(y, 1)
    return(sess.run(predicted_label, feed_dict={x: testFeature}))
开发者ID:Codelegant92,项目名称:RiskEvaluation,代码行数:31,代码来源:NeuralNetwork.py

示例15: __init__

    def __init__(self, layer_sizes, layer_types,
                 init_value_scale=1.0, uniform_init=False,
                 verbose = True):
        '''
        initialize network architecture
        :param layer_sizes: list type, layer sizes,
                e.g. a 3-layer network "784:256:10"
        :param layer_types: list type, hidden layer types,
                e.g. sigmoid/tanh or "sigmoid:tanh" for 2-hidden-layer network
        :param init_value_scale: int, scale for uniform initialization
        :param uniform_init: bool, true for uniform, gaussian otherwise
        :param verbose: bool, verbose
        :return:
        '''

        self.verbose = verbose
        # input settings
        self.x = tf.placeholder(tf.float32, [None, layer_sizes[0]], name='input')
        self.y = tf.placeholder(tf.float32, [None, layer_sizes[-1]], name='truth')
        self.learning_rate = tf.placeholder(tf.float32, name='learningrate')
        self.momentum = tf.placeholder(tf.float32, name='momentum')
        # layers
        self.layers = []
        # build multi-layer perceptron architecture
        if self.verbose: print('Building Multilayer Perceptron...')
        # forward pass and build output
        for idx in xrange(len(layer_sizes) - 1):
            n_input = layer_sizes[idx]
            n_output = layer_sizes[idx + 1]
            layer = Layer(n_input, n_output, layer_types[idx], init_value_scale, uniform_init)
            self.layers.append(layer)

        # forward
        net_output = self.x
        for idx in xrange(len(self.layers)):
            net_output = self.layers[idx].output(net_output)
        # cost function with ground truth provided, for training
        self.cost = self.layers[-1].neg_loglikelihood(net_output, self.y)
        # make prediction
        self.prediction = tf.arg_max(net_output, dimension=1)
        # prediction error
        self.prederr = tf.reduce_mean(tf.to_float(tf.not_equal(self.prediction, tf.arg_max(self.y, dimension=1))))
        # training
        self.train_process = tf.train.MomentumOptimizer(self.learning_rate, self.momentum).minimize(self.cost)
        # session
        self.sess = tf.Session()
开发者ID:zhaocq-nlp,项目名称:NeuralExamples,代码行数:46,代码来源:MLP.py


注:本文中的tensorflow.arg_max函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。