本文整理汇总了Python中tensorflow.reduce_min函数的典型用法代码示例。如果您正苦于以下问题:Python reduce_min函数的具体用法?Python reduce_min怎么用?Python reduce_min使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了reduce_min函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: init_data
def init_data(inputFile, K):
global training_data, validation_data, centroids, training_num, data_dim, centroids_num
global tf_data_set, tf_centroids
# initialize data and centroids
data = np.float32( np.load(inputFile))
data = (data - data.mean()) / data.std()
# update data_num and centroids_num
data_num, data_dim = data.shape
centroids_num = K
# training data and validation data
training_num = int(2./3 * data_num)
training_data = data[:training_num]
validation_data = data[training_num:]
centroids = tf.truncated_normal(shape=[centroids_num, data_dim])
# update tf_data_set and tf_centroids
tf_data_set = tf.placeholder(tf.float32, shape=[None, data_dim])
tf_centroids = tf.Variable(tf.convert_to_tensor(centroids, dtype=tf.float32))
########### for the training cases #####################
# get the euclidean distance
tf_train_dist = euclidean_dist(tf_data_set, tf_centroids, training_num, centroids_num)
# get the min index for data set
tf_train_min_index = tf.argmin(tf_train_dist, dimension=1)
# loss and optimizer
tf_train_loss = tf.reduce_sum(tf.reduce_min(euclidean_dist(tf_data_set, tf_centroids, training_num, centroids_num),
1, keep_dims=True))
tf_train_opt = tf.train.AdamOptimizer(learning_rate=0.1, beta1=0.9, beta2=0.99, epsilon=1e-5).minimize(tf_train_loss)
########### for the validation cases ####################
tf_valid_dist = euclidean_dist(tf_data_set, tf_centroids, (data_num-training_num), centroids_num)
tf_valid_min_index = tf.argmin(tf_valid_dist, dimension=1)
tf_valid_loss = tf.reduce_sum(tf.reduce_min(euclidean_dist(tf_data_set, tf_centroids, (data_num-training_num), centroids_num),
1, keep_dims=True))
return tf_train_min_index, tf_train_loss, tf_train_opt, tf_valid_loss
示例2: gen_debug_td_error_summaries
def gen_debug_td_error_summaries(
target_q_values, q_values, td_targets, td_errors):
"""Generates debug summaries for critic given a set of batch samples.
Args:
target_q_values: set of predicted next stage values.
q_values: current predicted value for the critic network.
td_targets: discounted target_q_values with added next stage reward.
td_errors: the different between td_targets and q_values.
"""
with tf.name_scope('td_errors'):
tf.summary.histogram('td_targets', td_targets)
tf.summary.histogram('q_values', q_values)
tf.summary.histogram('target_q_values', target_q_values)
tf.summary.histogram('td_errors', td_errors)
with tf.name_scope('td_targets'):
tf.summary.scalar('mean', tf.reduce_mean(td_targets))
tf.summary.scalar('max', tf.reduce_max(td_targets))
tf.summary.scalar('min', tf.reduce_min(td_targets))
with tf.name_scope('q_values'):
tf.summary.scalar('mean', tf.reduce_mean(q_values))
tf.summary.scalar('max', tf.reduce_max(q_values))
tf.summary.scalar('min', tf.reduce_min(q_values))
with tf.name_scope('target_q_values'):
tf.summary.scalar('mean', tf.reduce_mean(target_q_values))
tf.summary.scalar('max', tf.reduce_max(target_q_values))
tf.summary.scalar('min', tf.reduce_min(target_q_values))
with tf.name_scope('td_errors'):
tf.summary.scalar('mean', tf.reduce_mean(td_errors))
tf.summary.scalar('max', tf.reduce_max(td_errors))
tf.summary.scalar('min', tf.reduce_min(td_errors))
tf.summary.scalar('mean_abs', tf.reduce_mean(tf.abs(td_errors)))
示例3: coverage_box
def coverage_box(bboxes):
y_min, x_min, y_max, x_max = tf.split(
value=bboxes, num_or_size_splits=4, axis=1)
y_min_coverage = tf.reduce_min(y_min, axis=0)
x_min_coverage = tf.reduce_min(x_min, axis=0)
y_max_coverage = tf.reduce_max(y_max, axis=0)
x_max_coverage = tf.reduce_max(x_max, axis=0)
return tf.stack(
[y_min_coverage, x_min_coverage, y_max_coverage, x_max_coverage],
axis=1)
示例4: __init__
def __init__(self, reuse=False, trainable=True):
# Placeholders for our input
# Our input are 4 RGB frames of shape 160, 160 each
self.states = tf.placeholder(shape=[None, 84, 84, 4], dtype=tf.uint8, name="X")
# The TD target value
self.targets = tf.placeholder(shape=[None], dtype=tf.float32, name="y")
X = tf.to_float(self.states) / 255.0
batch_size = tf.shape(self.states)[0]
# Graph shared with Value Net
with tf.variable_scope("shared", reuse=reuse):
fc1 = build_shared_network(X, add_summaries=(not reuse))
with tf.variable_scope("value_net"):
self.logits = tf.contrib.layers.fully_connected(
inputs=fc1,
num_outputs=1,
activation_fn=None)
self.logits = tf.squeeze(self.logits, squeeze_dims=[1], name="logits")
self.losses = tf.squared_difference(self.logits, self.targets)
self.loss = tf.reduce_sum(self.losses, name="loss")
self.predictions = {
"logits": self.logits
}
# Summaries
prefix = tf.get_variable_scope().name
tf.scalar_summary(self.loss.name, self.loss)
tf.scalar_summary("{}/max_value".format(prefix), tf.reduce_max(self.logits))
tf.scalar_summary("{}/min_value".format(prefix), tf.reduce_min(self.logits))
tf.scalar_summary("{}/mean_value".format(prefix), tf.reduce_mean(self.logits))
tf.scalar_summary("{}/reward_max".format(prefix), tf.reduce_max(self.targets))
tf.scalar_summary("{}/reward_min".format(prefix), tf.reduce_min(self.targets))
tf.scalar_summary("{}/reward_mean".format(prefix), tf.reduce_mean(self.targets))
tf.histogram_summary("{}/reward_targets".format(prefix), self.targets)
tf.histogram_summary("{}/values".format(prefix), self.logits)
if trainable:
# self.optimizer = tf.train.AdamOptimizer(1e-4)
self.optimizer = tf.train.RMSPropOptimizer(0.00025, 0.99, 0.0, 1e-6)
self.grads_and_vars = self.optimizer.compute_gradients(self.loss)
self.grads_and_vars = [[grad, var] for grad, var in self.grads_and_vars if grad is not None]
self.train_op = self.optimizer.apply_gradients(self.grads_and_vars,
global_step=tf.contrib.framework.get_global_step())
var_scope_name = tf.get_variable_scope().name
summary_ops = tf.get_collection(tf.GraphKeys.SUMMARIES)
sumaries = [s for s in summary_ops if "policy_net" in s.name or "shared" in s.name]
sumaries = [s for s in summary_ops if var_scope_name in s.name]
self.summaries = tf.merge_summary(sumaries)
示例5: conv
def conv(self, input, k_h, k_w, c_o, s_h, s_w, name, relu=True, padding=DEFAULT_PADDING, group=1, trainable=True):
print name
if isinstance(input, tuple):
input = input[0]
self.validate_padding(padding)
c_i = input.get_shape()[-1]
print c_i
print input.get_shape().as_list()
assert c_i%group==0
assert c_o%group==0
convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding)
with tf.variable_scope(name) as scope:
init_weights = tf.truncated_normal_initializer(0.0, stddev=0.01)
init_biases = tf.constant_initializer(0.0)
kernel = self.make_var('weights', [k_h, k_w, c_i/group, c_o], init_weights, trainable)
biases = self.make_var('biases', [c_o], init_biases, trainable)
with tf.name_scope('summaries'):
with tf.name_scope('weights'):
mean = tf.reduce_mean(kernel)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(kernel- mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(kernel))
tf.summary.scalar('min', tf.reduce_min(kernel))
tf.summary.histogram('histogram', kernel)
with tf.name_scope('biases'):
mean = tf.reduce_mean(biases)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(biases- mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(biases))
tf.summary.scalar('min', tf.reduce_min(biases))
tf.summary.histogram('histogram', biases)
if group==1:
conv = convolve(input, kernel)
else:
input_groups = tf.split(3, group, input)
kernel_groups = tf.split(3, group, kernel)
output_groups = [convolve(i, k) for i,k in zip(input_groups, kernel_groups)]
conv = tf.concat(3, output_groups)
if relu:
bias = tf.nn.bias_add(conv, biases)
return tf.nn.relu(bias, name=scope.name)
return tf.nn.bias_add(conv, biases, name=scope.name)
示例6: fc
def fc(self, input, num_out, name, relu=True, trainable=True):
print name
with tf.variable_scope(name) as scope:
# only use the first input
if isinstance(input, tuple):
input = input[0]
input_shape = input.get_shape()
if input_shape.ndims == 4:
dim = 1
for d in input_shape[1:].as_list():
dim *= d
feed_in = tf.reshape(tf.transpose(input,[0,3,1,2]), [-1, dim])
else:
feed_in, dim = (input, int(input_shape[-1]))
if name == 'bbox_pred':
init_weights = tf.truncated_normal_initializer(0.0, stddev=0.001)
init_biases = tf.constant_initializer(0.0)
else:
init_weights = tf.truncated_normal_initializer(0.0, stddev=0.01)
init_biases = tf.constant_initializer(0.0)
weights = self.make_var('weights', [dim, num_out], init_weights, trainable)
biases = self.make_var('biases', [num_out], init_biases, trainable)
with tf.name_scope('summaries'):
with tf.name_scope('weights'):
mean = tf.reduce_mean(weights)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(weights- mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(weights))
tf.summary.scalar('min', tf.reduce_min(weights))
tf.summary.histogram('histogram', weights)
with tf.name_scope('biases'):
mean = tf.reduce_mean(biases)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(biases- mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(biases))
tf.summary.scalar('min', tf.reduce_min(biases))
tf.summary.histogram('histogram', biases)
op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b
fc = op(feed_in, weights, biases, name=scope.name)
return fc
示例7: print_act_stats
def print_act_stats(x, _str=""):
if not do_print_act_stats:
return x
if hvd.rank() != 0:
return x
if len(x.get_shape()) == 1:
x_mean, x_var = tf.nn.moments(x, [0], keep_dims=True)
if len(x.get_shape()) == 2:
x_mean, x_var = tf.nn.moments(x, [0], keep_dims=True)
if len(x.get_shape()) == 4:
x_mean, x_var = tf.nn.moments(x, [0, 1, 2], keep_dims=True)
stats = [tf.reduce_min(x_mean), tf.reduce_mean(x_mean), tf.reduce_max(x_mean),
tf.reduce_min(tf.sqrt(x_var)), tf.reduce_mean(tf.sqrt(x_var)), tf.reduce_max(tf.sqrt(x_var))]
return tf.Print(x, stats, "["+_str+"] "+x.name)
示例8: compute_lookup_error
def compute_lookup_error(self, val):
#computes lookup error.
cond = tf.equal(self.batch_print_answer, val)
inter = tf.where(
cond, self.init_print_error,
tf.tile(
tf.reshape(tf.constant(1e10, self.data_type), [1, 1, 1]), [
self.batch_size, self.utility.FLAGS.max_word_cols +
self.utility.FLAGS.max_number_cols,
self.utility.FLAGS.max_elements
]))
return tf.reduce_min(tf.reduce_min(inter, 1), 1) * tf.cast(
tf.greater(
tf.reduce_sum(tf.reduce_sum(tf.cast(cond, self.data_type), 1), 1),
0.0), self.data_type)
示例9: conv2d_1
def conv2d_1(name, inputs, shape, strides=1):
with tf.name_scope(name+"_conv"):
W = tf.Variable(tf.random_normal(shape))
tf.add_to_collection('l2_losses', tf.contrib.layers.l2_regularizer(lambda1)(W))
x1 = tf.nn.conv2d(inputs, W, strides=[1, strides, strides, 1], padding='SAME', name="conv1")
if name=='layerM21' and is_training == True:
tf.summary.scalar('w_mean',tf.reduce_mean(W))
tf.summary.scalar('w_max',tf.reduce_max(W))
tf.summary.scalar('w_min',tf.reduce_min(W))
with tf.name_scope(name+"_bias"):
B = tf.Variable(tf.random_normal([shape[-1]]))
tf.add_to_collection('l2_losses', tf.contrib.layers.l2_regularizer(lambda1)(B))
x2 = tf.nn.bias_add(x1, B, name="bias1")
with tf.name_scope(name+"_BN"):
x3 = bn_layer(x2, is_training, name=name)
with tf.name_scope(name+"_relu"):
c1_out=leaky_relu(x3)
#c1_out=tf.nn.leaky_relu(x3)
return c1_out
示例10: _psd_mask
def _psd_mask(x):
"""Computes whether each square matrix in the input is positive semi-definite.
Args:
x: A floating-point `Tensor` of shape `[B1, ..., Bn, M, M]`.
Returns:
mask: A floating-point `Tensor` of shape `[B1, ... Bn]`. Each
scalar is 1 if the corresponding matrix was PSD, otherwise 0.
"""
# Allegedly
# https://scicomp.stackexchange.com/questions/12979/testing-if-a-matrix-is-positive-semi-definite
# it is more efficient to test for positive semi-definiteness by
# trying to compute the Cholesky decomposition -- the matrix is PSD
# if you succeed and not PSD if you fail. However, TensorFlow's
# Cholesky raises an exception if _any_ of the input matrices are
# not PSD, from which I don't know how to extract _which ones_, so I
# proceed by explicitly computing all the eigenvalues and checking
# whether they are all positive or not.
#
# Also, as was discussed in the answer, it is somewhat dangerous to
# treat SPD-ness as binary in floating-point arithmetic. Cholesky
# factorization can complete and 'look' like everything is fine
# (e.g., O(1) entries and a diagonal of all ones) but the matrix can
# have an exponential condition number.
eigenvalues, _ = tf.self_adjoint_eig(x)
return tf.cast(
tf.reduce_min(eigenvalues, axis=-1) >= 0, dtype=x.dtype)
示例11: disjunction_of_literals
def disjunction_of_literals(literals, label="no_label"):
list_of_literal_tensors = [lit.tensor for lit in literals]
literals_tensor = tf.concat(1,list_of_literal_tensors)
if default_tnorm == "product":
result = 1.0-tf.reduce_prod(1.0-literals_tensor, 1, keep_dims=True)
if default_tnorm == "yager2":
result = tf.minimum(1.0, tf.sqrt(tf.reduce_sum(tf.square(literals_tensor), 1, keep_dims=True)))
if default_tnorm == "luk":
print "data aggregator is lukas"
result = tf.minimum(1.0, tf.reduce_sum(literals_tensor, 1, keep_dims=True))
PR(result)
if default_tnorm == "goedel":
result = tf.reduce_max(literals_tensor, 1, keep_dims=True, name=label)
if default_aggregator == "product":
return tf.reduce_prod(result, keep_dims=True)
if default_aggregator == "mean":
print "data aggregator is mean"
return tf.reduce_mean(result, keep_dims=True, name=label)
if default_aggregator == "gmean":
return tf.exp(tf.mul(tf.reduce_sum(tf.log(result), keep_dims=True),
tf.inv(tf.to_float(tf.size(result)))), name=label)
if default_aggregator == "hmean":
print "data aggregator is hmean"
return tf.div(tf.to_float(tf.size(result)), tf.reduce_sum(tf.inv(result), keep_dims=True))
if default_aggregator == "min":
print "data aggregator is min"
return tf.reduce_min(result, keep_dims=True, name=label)
if default_aggregator == "qmean":
print "data aggregator is qmean"
return tf.sqrt(tf.reduce_mean(tf.square(result), keep_dims=True), name=label)
if default_aggregator == "cmean":
print "data aggregator is cmean"
return tf.pow(tf.reduce_mean(tf.pow(result, 3), keep_dims=True), tf.inv(tf.to_float(3)), name=label)
示例12: __init__
def __init__(self, label, clauses, save_path=""):
print "defining the knowledge base", label
self.label = label
self.clauses = clauses
self.parameters = [par for cl in self.clauses for par in cl.parameters]
if not self.clauses:
self.tensor = tf.constant(1.0)
else:
clauses_value_tensor = tf.concat(0, [cl.tensor for cl in clauses])
if default_clauses_aggregator == "min":
print "clauses aggregator is min"
self.tensor = tf.reduce_min(clauses_value_tensor)
if default_clauses_aggregator == "mean":
print "clauses aggregator is mean"
self.tensor = tf.reduce_mean(clauses_value_tensor)
if default_clauses_aggregator == "hmean":
print "clauses aggregator is hmean"
self.tensor = tf.div(tf.to_float(tf.size(clauses_value_tensor)), tf.reduce_sum(tf.inv(clauses_value_tensor), keep_dims=True))
if default_clauses_aggregator == "wmean":
print "clauses aggregator is weighted mean"
weights_tensor = tf.constant([cl.weight for cl in clauses])
self.tensor = tf.div(tf.reduce_sum(tf.mul(weights_tensor, clauses_value_tensor)), tf.reduce_sum(weights_tensor))
if default_positive_fact_penality != 0:
self.loss = smooth(self.parameters) + \
tf.mul(default_positive_fact_penality, self.penalize_positive_facts()) - \
PR(self.tensor)
else:
self.loss = smooth(self.parameters) - PR(self.tensor)
self.save_path = save_path
self.train_op = train_op(self.loss, default_optimizer)
self.saver = tf.train.Saver(max_to_keep=20)
print "knowledge base", label, "is defined"
示例13: get_losses
def get_losses(obj_mask):
"""Get motion constraint loss."""
# Find height of segment.
coords = tf.where(tf.greater( # Shape (num_true, 2=yx)
obj_mask[:, :, 0], tf.constant(0.5, dtype=tf.float32)))
y_max = tf.reduce_max(coords[:, 0])
y_min = tf.reduce_min(coords[:, 0])
seg_height = y_max - y_min
f_y = self.intrinsic_mat[i, 0, 1, 1]
approx_depth = ((f_y * self.global_scale_var) /
tf.to_float(seg_height))
reference_pred = tf.boolean_mask(
depth_pred, tf.greater(
tf.reshape(obj_mask[:, :, 0],
(self.img_height, self.img_width, 1)),
tf.constant(0.5, dtype=tf.float32)))
# Establish loss on approx_depth, a scalar, and
# reference_pred, our dense prediction. Normalize both to
# prevent degenerative depth shrinking.
global_mean_depth_pred = tf.reduce_mean(depth_pred)
reference_pred /= global_mean_depth_pred
approx_depth /= global_mean_depth_pred
spatial_err = tf.abs(reference_pred - approx_depth)
mean_spatial_err = tf.reduce_mean(spatial_err)
return mean_spatial_err
示例14: _summarize_vars_and_grads
def _summarize_vars_and_grads(grads_and_vars):
tf.logging.info('Trainable variables:')
tf.logging.info('-' * 60)
for grad, var in grads_and_vars:
tf.logging.info(var)
def tag(name, v=var):
return v.op.name + '_' + name
# Variable summary
mean = tf.reduce_mean(var)
tf.summary.scalar(tag('mean'), mean)
with tf.name_scope(tag('stddev')):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar(tag('stddev'), stddev)
tf.summary.scalar(tag('max'), tf.reduce_max(var))
tf.summary.scalar(tag('min'), tf.reduce_min(var))
tf.summary.histogram(tag('histogram'), var)
# Gradient summary
if grad is not None:
if isinstance(grad, tf.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
tf.summary.histogram(tag('gradient'), grad_values)
tf.summary.scalar(tag('gradient_norm'), tf.global_norm([grad_values]))
else:
tf.logging.info('Var %s has no gradient', var.op.name)
示例15: histogram
def histogram(self, x, value_range=None, nbins=None, name=None):
"""Return histogram of values.
Given the tensor `values`, this operation returns a rank 1 histogram
counting the number of entries in `values` that fell into every bin. The
bins are equal width and determined by the arguments `value_range` and
`nbins`.
Args:
x: 1D numeric `Tensor` of items to count.
value_range: Shape [2] `Tensor`. `new_values <= value_range[0]` will be
mapped to `hist[0]`, `values >= value_range[1]` will be mapped to
`hist[-1]`. Must be same dtype as `x`.
nbins: Scalar `int32 Tensor`. Number of histogram bins.
name: Python `str` name prefixed to Ops created by this class.
Returns:
counts: 1D `Tensor` of counts, i.e.,
`counts[i] = sum{ edges[i-1] <= values[j] < edges[i] : j }`.
edges: 1D `Tensor` characterizing intervals used for counting.
"""
with tf.name_scope(name, "histogram", [x]):
x = tf.convert_to_tensor(x, name="x")
if value_range is None:
value_range = [tf.reduce_min(x), 1 + tf.reduce_max(x)]
value_range = tf.convert_to_tensor(value_range, name="value_range")
lo = value_range[0]
hi = value_range[1]
if nbins is None:
nbins = tf.to_int32(hi - lo)
delta = (hi - lo) / tf.cast(nbins, dtype=value_range.dtype.base_dtype)
edges = tf.range(
start=lo, limit=hi, delta=delta, dtype=x.dtype.base_dtype)
counts = tf.histogram_fixed_width(x, value_range=value_range, nbins=nbins)
return counts, edges