当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.argmin函数代码示例

本文整理汇总了Python中tensorflow.argmin函数的典型用法代码示例。如果您正苦于以下问题:Python argmin函数的具体用法?Python argmin怎么用?Python argmin使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了argmin函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: init_data

def init_data(inputFile, K):
    global training_data, validation_data, centroids, training_num, data_dim, centroids_num 
    global tf_data_set, tf_centroids
    # initialize data and centroids
    data = np.float32( np.load(inputFile))
    data = (data - data.mean()) / data.std()
    # update data_num and centroids_num
    data_num, data_dim = data.shape
    centroids_num = K
    # training data and validation data
    training_num = int(2./3 * data_num)
    training_data = data[:training_num]
    validation_data = data[training_num:]
    centroids = tf.truncated_normal(shape=[centroids_num, data_dim])
    # update tf_data_set and tf_centroids
    tf_data_set = tf.placeholder(tf.float32, shape=[None, data_dim])
    tf_centroids = tf.Variable(tf.convert_to_tensor(centroids, dtype=tf.float32))
    ########### for the training cases #####################
    # get the euclidean distance
    tf_train_dist = euclidean_dist(tf_data_set, tf_centroids, training_num, centroids_num)
    # get the min index for data set
    tf_train_min_index = tf.argmin(tf_train_dist, dimension=1)
    # loss and optimizer
    tf_train_loss = tf.reduce_sum(tf.reduce_min(euclidean_dist(tf_data_set, tf_centroids, training_num, centroids_num), 
        1, keep_dims=True))
    tf_train_opt = tf.train.AdamOptimizer(learning_rate=0.1, beta1=0.9, beta2=0.99, epsilon=1e-5).minimize(tf_train_loss)
    ########### for the validation cases ####################
    tf_valid_dist = euclidean_dist(tf_data_set, tf_centroids, (data_num-training_num), centroids_num)
    tf_valid_min_index = tf.argmin(tf_valid_dist, dimension=1)
    tf_valid_loss = tf.reduce_sum(tf.reduce_min(euclidean_dist(tf_data_set, tf_centroids, (data_num-training_num), centroids_num), 
        1, keep_dims=True))
    return tf_train_min_index, tf_train_loss, tf_train_opt, tf_valid_loss
开发者ID:z23han,项目名称:ECE521-Inference-Algorithm-and-Machine-Learning,代码行数:32,代码来源:a3_q1_2_4.py

示例2: testArgMinMax

 def testArgMinMax(self):
   with self.cached_session():
     self.assertAllEqual(
         tf.argmin([[1, 2, 3], [4, 1, 0]], dimension=1).eval(),
         [0, 2])
     self.assertAllEqual(
         tf.argmin([[1, 2, 3], [4, 1, 0]], dimension=0).eval(),
         [0, 1, 1])
     self.assertAllEqual(
         tf.argmax([[1, 2, 3], [4, 1, 0]], dimension=1).eval(),
         [2, 0])
     self.assertAllEqual(
         tf.argmax([[1, 2, 3], [4, 1, 0]], dimension=0).eval(),
         [1, 0, 0])
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:14,代码来源:test_file_v0_11.py

示例3: recall

 def recall(self, y_):
     y_true = tf.cast(tf.argmin(y_, 1), tf.bool)
     y_pred = tf.cast(tf.argmin(self.y, 1), tf.bool)
     # 1 stands for positive, 0 stands for negative
     tp = tf.reduce_sum(tf.cast(tf.logical_and(y_true, y_pred), tf.float32))
     tn = tf.reduce_sum(tf.cast(tf.logical_not(tf.logical_or(y_true, y_pred)), tf.float32))
     p = tf.reduce_sum(tf.cast(y_true, tf.float32))
     n = tf.reduce_sum(tf.cast(tf.logical_not(y_true), tf.float32))
     fp = p - tp
     fn = n - tn
     # t = tf.add(tp, tn)
     # f = tf.add(fp, fn)
     relevant = tf.add(tp, fn)
     recall = tf.div(tp, relevant)
     return recall
开发者ID:thuzhf,项目名称:Advanced-Machine-Learning,代码行数:15,代码来源:model.py

示例4: precision

 def precision(self, y_):
     y_true = tf.cast(tf.argmin(y_, 1), tf.bool)
     y_pred = tf.cast(tf.argmin(self.y, 1), tf.bool)
     # 1 stands for positive, 0 stands for negative
     tp = tf.reduce_sum(tf.cast(tf.logical_and(y_true, y_pred), tf.float32))
     # tn = tf.reduce_sum(tf.cast(tf.logical_not(tf.logical_or(y_true, y_pred)), tf.float32))
     p = tf.reduce_sum(tf.cast(y_true, tf.float32))
     # n = tf.reduce_sum(tf.cast(tf.logical_not(y_true), tf.float32))
     # fp = p - tp
     # fn = n - tn
     # t = tf.add(tp, tn)
     # f = tf.add(fp, fn)
     # relevant = tf.add(tp, fn)
     precision = tf.div(tp, p)
     return precision
开发者ID:thuzhf,项目名称:Advanced-Machine-Learning,代码行数:15,代码来源:model.py

示例5: assign_to_nearest

 def assign_to_nearest(self, samples, centroids):
     expanded_vectors = tf.expand_dims(samples, 0)
     expanded_centroids = tf.expand_dims(centroids, 1)
     distances = tf.reduce_sum(tf.square(tf.sub(expanded_vectors, expanded_centroids)), 2)
     mins = tf.argmin(distances, 0)
     nearest_indices = mins
     return nearest_indices
开发者ID:KellyChan,项目名称:python-examples,代码行数:7,代码来源:clustering.py

示例6: argmin

def argmin(x, axis=-1):
    '''Returns the index of the minimum value
    along a tensor axis.
    '''
    if axis < 0:
        axis = axis % len(x.get_shape())
    return tf.argmin(x, axis)
开发者ID:NajNaj,项目名称:keras,代码行数:7,代码来源:tensorflow_backend.py

示例7: _build_graph

    def _build_graph(self):
        """Construct tensorflow nodes for round of clustering"""
        # N.B. without tf.Variable, makes awesome glitchy clustered images
        self.centroids_in = tf.Variable(tf.slice(tf.random_shuffle(self.arr),
                                     [0, 0], [self.k, -1]), name="centroids_in")
        # tiled should be shape(self.n_pixels, self.k, size_data = 2 + self.channels)
        tiled_pix = tf.tile(tf.expand_dims(self.arr, 1),
                            multiples=[1, self.k, 1], name="tiled_pix")

        # no need to take square root b/c positive reals and sqrt are isomorphic
        def radical_euclidean_dist(x, y):
            """Takes in 2 tensors and returns euclidean distance radical, i.e. dist**2"""
            with tf.name_scope("radical_euclidean"):
                return tf.square(tf.sub(x, y))

        # should be shape(self.n_pixels, self.k)
        distances = tf.reduce_sum(radical_euclidean_dist(tiled_pix, self.centroids_in),
                                  reduction_indices=2, name="distances")
        # should be shape(self.n_pixels)
        nearest = tf.to_int32(tf.argmin(distances, 1), name="nearest")

        # should be list of len self.k with tensors of shape(size_cluster, size_data)
        self.clusters = tf.dynamic_partition(self.arr, nearest, self.k)
        # should be shape(self.k, size_data)
        self.centroids = tf.pack([tf.reduce_mean(cluster, 0) for cluster in self.clusters],
            name="centroids_out")
        self.update_roids = tf.assign(self.centroids_in, self.centroids)
开发者ID:meereeum,项目名称:k-meanz,代码行数:27,代码来源:k_means_tf.py

示例8: model_train

def model_train(k):
    data = np.float32(np.load('data100D.npy'))
    sample_num = data.shape[0]
    dim = data.shape[1]
    cluster = k

    tf_data = tf.placeholder(tf.float32, shape=(sample_num, dim))
    tf_centroids = tf.Variable(tf.truncated_normal([k, dim], mean=0.0, stddev=1.0))
    tf_min_index = tf.argmin(eucl_distance(tf_data, tf_centroids), dimension = 1)
    tf_loss = tf.reduce_sum(tf.reduce_min(eucl_distance(tf_data, tf_centroids),1,keep_dims=True))
    optimizer = tf.train.AdamOptimizer(0.01,0.9,0.99,1e-5).minimize(tf_loss)

    sess = tf.InteractiveSession()

    init = tf.initialize_all_variables()
    init.run()

    epoch = 1000
    loss_list = []
    for i in range(epoch):
        feed_dict = {tf_data: data}
        _, loss, assignments, centroids = sess.run([optimizer, tf_loss, tf_min_index, tf_centroids], feed_dict = feed_dict)
        loss_list.append(loss)
        if (i % 50== 0):
            print("Loss at step %d: %f" % (i, loss))

    cal_percentage(assignments, k)

    plt.title('the loss vs the number of updates 100-D')
    plt.xlabel('the number of updates')
    plt.ylabel('the value of the loss')
    plt.plot(range(len(loss_list)), loss_list)
    plt.show()
    return loss
开发者ID:z23han,项目名称:ECE521-Inference-Algorithm-and-Machine-Learning,代码行数:34,代码来源:q_2_2_4_kmeans.py

示例9: discretize_centroids

def discretize_centroids(x, levels, centroids, thermometer=False):
  """Discretize input into levels using custom centroids.

  Args:
    x: Input tensor to discretize, assumed to be between (0, 1).
    levels: Number of levels to discretize into.
    centroids: Custom centroids into which the input is to be discretized.
    thermometer: Whether to encode the discretized tensor in thermometer encoding
        (Default: False).

  Returns:
    Discretized version of x of shape [-1, height, width, channels * levels]
    using supplied centroids.
  """
  x_stacked = tf.stack(levels * [x], axis=-1)
  dist = tf.to_float(tf.squared_difference(x_stacked, centroids))
  idx = tf.argmin(dist, axis=-1)
  one_hot = tf.one_hot(idx, depth=levels, on_value=1., off_value=0.)

  # Check to see if we are encoding in thermometer
  discretized_x = one_hot
  if thermometer:
    discretized_x = one_hot_to_thermometer(one_hot, levels, flattened=False)

  # Reshape x to [-1, height, width, channels * levels]
  discretized_x = flatten_last(discretized_x)
  return discretized_x
开发者ID:locussam,项目名称:obfuscated-gradients,代码行数:27,代码来源:discretization_utils.py

示例10: __call__

  def __call__(self, codes):
    """Use codebook to find nearest neighbor for each code.

    Args:
      codes: A `float`-like `Tensor` containing the latent
        vectors to be compared to the codebook. These are rank-3 with shape
        `[batch_size, latent_size, code_size]`.

    Returns:
      nearest_codebook_entries: The 1-nearest neighbor in Euclidean distance for
        each code in the batch.
      one_hot_assignments: The one-hot vectors corresponding to the matched
        codebook entry for each code in the batch.
    """
    distances = tf.norm(
        tf.expand_dims(codes, 2) -
        tf.reshape(self.codebook, [1, 1, self.num_codes, self.code_size]),
        axis=3)
    assignments = tf.argmin(distances, 2)
    one_hot_assignments = tf.one_hot(assignments, depth=self.num_codes)
    nearest_codebook_entries = tf.reduce_sum(
        tf.expand_dims(one_hot_assignments, -1) *
        tf.reshape(self.codebook, [1, 1, self.num_codes, self.code_size]),
        axis=2)
    return nearest_codebook_entries, one_hot_assignments
开发者ID:lewisKit,项目名称:probability,代码行数:25,代码来源:vq_vae.py

示例11: get_train

def get_train(train_ph_dict,var_dict,var_ph_dict,arg_dict):
    mid0 = tf.one_hot(train_ph_dict['choice_0'], 9, axis=-1, dtype=tf.float32)
    mid0 = mid0 * get_q(train_ph_dict['state_0'],var_dict)
    mid0 = tf.reduce_sum(mid0, reduction_indices=[1])

    mid1 = get_q(train_ph_dict['state_1'],var_ph_dict)
    mid1 = tf.reduce_max(mid1, reduction_indices=[1])  
    mid1 = mid1 * train_ph_dict['cont']
    mid1 = mid1 * tf.constant(arg_dict['train_beta'])

#     l2r = tf.constant(0.0)
#     cell_count = tf.constant(0.0)
#     for v in var_dict.values():
#         l2r = l2r + get_l2(v)
#         cell_count = cell_count + tf.to_float(tf.size(v))
#     l2r = l2r / cell_count
#     l2r = l2r / tf.constant(ELEMENT_L2_FACTOR*ELEMENT_L2_FACTOR)
#     l2r = l2r * tf.constant(L2_WEIGHT)
    
    mid = mid0+mid1-train_ph_dict['reward_1']
#    mid = mid * mid
    mid = tf.abs(mid)
    min_loss_idx = tf.argmin(mid, dimension=0)
    mid = tf.reduce_mean(mid)
    score_diff = mid
#     mid = mid + l2r
#     mid = mid + ( tf.abs( tf.reduce_mean(var_dict['b5']) ) * tf.constant(L2_WEIGHT) )

    loss = mid

    mid = tf.train.AdamOptimizer().minimize(mid,var_list=var_dict.values())
    train = mid
    
    return train, loss, score_diff, min_loss_idx
开发者ID:luzi82,项目名称:codelog.tensorflow.tictactoe,代码行数:34,代码来源:deeplearn0013.py

示例12: assign_to_cluster

def assign_to_cluster(X, centroids):  

    expanded_vectors = tf.expand_dims(X, 0)
    expanded_centroids = tf.expand_dims(centroids, 1)
    distances = tf.reduce_sum(tf.square(tf.subtract(expanded_vectors, expanded_centroids)), 2)
    mins = tf.argmin(distances, 0)

    return mins
开发者ID:timestocome,项目名称:DeepLearning,代码行数:8,代码来源:KMeans_tf.py

示例13: cal_loss

 def cal_loss(self, X, Y, D):
     ED = ed.Euclid_Distance(X, Y, D)
     dist = ED.cal_Euclid_dis()
     cluster = tf.argmin(dist, 1)
     correspond_cluster = tf.gather(Y,cluster)
     offset = tf.sub(X, correspond_cluster)
     loss = tf.reduce_sum(tf.square(offset))
     return loss, cluster
开发者ID:haoyues,项目名称:ML_HWs,代码行数:8,代码来源:k_mean.py

示例14: model

  def model(data, train=False):
    """The Model definition."""
    # 2D convolution, with 'SAME' padding (i.e. the output feature map has
    # the same size as the input). Note that {strides} is a 4D array whose
    # shape matches the data layout: [image index, y, x, depth].
    conv = tf.nn.conv2d(data,
                        conv1_weights,
                        strides=[1, 1, 1, 1],
                        padding='SAME')
    # Bias and rectified linear non-linearity.
    relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases))
    # Max pooling. The kernel size spec {ksize} also follows the layout of
    # the data. Here we have a pooling window of 2, and a stride of 2.
    pool = tf.nn.max_pool(relu,
                          ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1],
                          padding='SAME')
    conv = tf.nn.conv2d(pool,
                        conv2_weights,
                        strides=[1, 1, 1, 1],
                        padding='SAME')
    relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases))
    pool = tf.nn.max_pool(relu,
                          ksize=[1, 2, 2, 1],
                          strides=[1, 2, 2, 1],
                          padding='SAME')
    # Reshape the feature map cuboid into a 2D matrix to feed it to the
    # fully connected layers.
    pool_shape = pool.get_shape().as_list()
    reshape = tf.reshape(
        pool,
        [pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])

    C_max = 4 * np.sqrt(6. / (20+512))
    C_init = tf.random_uniform(shape=[20,512],
                       minval=-C_max,maxval=C_max)
    C = tf.Variable(C_init)

    C2 = tf.expand_dims(C,0)
    ipdb.set_trace()
    X2 = tf.expand_dims(reshape)
    
    dist = tf.reduce_sum(tf.square(tf.sub(X2,C2)),2)
    loss1 = tf.reduce_mean(tf.reduce_min(dist,1))
    choice = tf.argmin(dist,1)
    
    # Fully connected layer. Note that the '+' operation automatically
    # broadcasts the biases.
    hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
    # Add a 50% dropout during training only. Dropout also scales
    # activations such that no rescaling is needed at evaluation time.
    if train:
      hidden = tf.nn.dropout(hidden, 0.5, seed=SEED)

    logits = tf.matmul(hidden, fc2_weights) + fc2_biases

    return loss1, logits 
开发者ID:ktho22,项目名称:cdn,代码行数:57,代码来源:convolutional.py

示例15: build_graph

    def build_graph(self, graph):
        self.xtr = tf.placeholder(dtype=tf.float32, shape=[None, 784])
        self.xte = tf.placeholder(dtype=tf.float32, shape=[784])    # one vector compares with all in self.xtr
        self.distance = tf.reduce_sum(tf.abs(tf.add(self.xtr, tf.negative(self.xte))), reduction_indices=1)
        self.pred = tf.argmin(self.distance, 0)

        self.global_step_t = tf.Variable(0, trainable=False, name='global_step_t')

        return graph
开发者ID:jamescfli,项目名称:PythonTest,代码行数:9,代码来源:make_nearest_neighbour_model.py


注:本文中的tensorflow.argmin函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。