本文整理汇总了Python中tensorflow.python.platform.tf_logging.vlog函数的典型用法代码示例。如果您正苦于以下问题:Python vlog函数的具体用法?Python vlog怎么用?Python vlog使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了vlog函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: benchmarkSamplingMVNDiag
def benchmarkSamplingMVNDiag(self):
logging.vlog(
2, "mvn_diag\tuse_gpu\tcomponents\tbatch\tfeatures\tsample\twall_time")
def create_distribution(batch_size, num_components, num_features):
cat = ds.Categorical(
logits=np.random.randn(batch_size, num_components))
mus = [
variables.Variable(np.random.randn(batch_size, num_features))
for _ in range(num_components)
]
sigmas = [
variables.Variable(np.random.rand(batch_size, num_features))
for _ in range(num_components)
]
components = list(
ds.MultivariateNormalDiag(
loc=mu, scale_diag=sigma) for (mu, sigma) in zip(mus, sigmas))
return ds.Mixture(cat, components, use_static_graph=self.use_static_graph)
for use_gpu in False, True:
if use_gpu and not test.is_gpu_available():
continue
for num_components in 1, 8, 16:
for batch_size in 1, 32:
for num_features in 1, 64, 512:
for sample_size in 1, 32, 128:
self._runSamplingBenchmark(
"mvn_diag",
create_distribution=create_distribution,
use_gpu=use_gpu,
num_components=num_components,
batch_size=batch_size,
num_features=num_features,
sample_size=sample_size)
示例2: _padding_size_conv_pool
def _padding_size_conv_pool(node, kernel_size, stride, input_resolution=None):
"""Computes padding size given a TF convolution or pooling node.
Args:
node: Tensorflow node (NodeDef proto).
kernel_size: Kernel size of node (integer).
stride: Stride size of node (integer).
input_resolution: Input resolution to assume, if not None (integer).
Returns:
total_padding: Total padding size (integer).
padding: Padding size, applied to the left or top (integer).
Raises:
ValueError: If padding is invalid.
"""
# In this case, we need to carefully consider the different TF padding modes.
# The padding depends on kernel size, and may depend on input size. If it
# depends on input size and input_resolution is None, we raise an exception.
padding_attr = node.attr["padding"]
logging.vlog(4, "padding_attr = %s", padding_attr)
if padding_attr.s in _VALID_PADDING:
total_padding = 0
padding = 0
elif padding_attr.s in _SAME_PADDING:
if input_resolution is None:
# In this case, we do not know the input resolution, so we can only know
# the padding in some special cases.
if kernel_size == 1:
total_padding = 0
padding = 0
elif stride == 1:
total_padding = kernel_size - 1
padding = int(math.floor(float(total_padding) / 2))
elif stride == 2 and kernel_size % 2 == 0:
# In this case, we can be sure of the left/top padding, but not of the
# total padding.
total_padding = None
padding = int(math.floor((float(kernel_size) - 1) / 2))
else:
total_padding = None
padding = None
logging.warning(
"Padding depends on input size, which means that the effective "
"padding may be different depending on the input image "
"dimensionality. In this case, alignment check will be skipped. If"
" you know the input resolution, please set it.")
else:
# First, compute total_padding based on documentation.
if input_resolution % stride == 0:
total_padding = int(max(float(kernel_size - stride), 0.0))
else:
total_padding = int(
max(float(kernel_size - (input_resolution % stride)), 0.0))
# Then, compute left/top padding.
padding = int(math.floor(float(total_padding) / 2))
else:
raise ValueError("Invalid padding operation %s" % padding_attr.s)
return total_padding, padding
示例3: _get_layer_params
def _get_layer_params(node, name_to_order_node):
"""Gets layer parameters relevant for RF computation.
Currently, only these nodes are supported:
- Conv2D
- DepthwiseConv2dNative
- Pad
- MaxPool
- AvgPool
- all nodes listed in _UNCHANGED_RF_LAYER_OPS
Args:
node: Tensorflow node (NodeDef proto).
name_to_order_node: Map from name to {order, node}. Output of
graph_compute_order.get_compute_order().
Returns:
kernel_size_x: Kernel size for horizontal direction (integer).
kernel_size_y: Kernel size for vertical direction (integer).
stride_x: Stride size for horizontal direction (integer).
stride_y: Stride size for vertical direction (integer).
padding_x: Padding size for horizontal direction (integer).
padding_y: Padding size for vertical direction (integer).
Raises:
ValueError: If layer op is unknown.
"""
logging.vlog(3, "node.op = %s", node.op)
logging.vlog(4, "node = %s", node)
if node.op == "Conv2D" or node.op == "DepthwiseConv2dNative":
stride_x, stride_y = _stride_size(node)
kernel_size_x, kernel_size_y = _conv_kernel_size(node, name_to_order_node)
# Compute the padding for this node separately for each direction.
padding_x = _padding_size_conv_pool(node, kernel_size_x, stride_x)
padding_y = _padding_size_conv_pool(node, kernel_size_y, stride_y)
elif node.op == "Pad":
# Kernel and stride are simply 1 in this case.
kernel_size_x = 1
kernel_size_y = 1
stride_x = 1
stride_y = 1
padding_x, padding_y = _padding_size_pad_layer(node, name_to_order_node)
elif node.op == "MaxPool" or node.op == "AvgPool":
stride_x, stride_y = _stride_size(node)
kernel_size_x, kernel_size_y = _pool_kernel_size(node)
# Compute the padding for this node separately for each direction.
padding_x = _padding_size_conv_pool(node, kernel_size_x, stride_x)
padding_y = _padding_size_conv_pool(node, kernel_size_y, stride_y)
elif node.op in _UNCHANGED_RF_LAYER_OPS:
# These nodes do not modify the RF parameters.
kernel_size_x = 1
kernel_size_y = 1
stride_x = 1
stride_y = 1
padding_x = 0
padding_y = 0
else:
raise ValueError("Unknown layer for operation '%s': %s" % (node.name,
node.op))
return kernel_size_x, kernel_size_y, stride_x, stride_y, padding_x, padding_y
示例4: _conv_kernel_size
def _conv_kernel_size(node, name_to_order_node):
"""Computes kernel size given a TF convolution or pooling node.
Args:
node: Tensorflow node (NodeDef proto).
name_to_order_node: Map from name to {order, node}. Output of
graph_compute_order.get_compute_order().
Returns:
kernel_size_x: Kernel size for horizontal direction (integer).
kernel_size_y: Kernel size for vertical direction (integer).
Raises:
ValueError: If the weight layer node is invalid.
"""
weights_layer_read_name = node.input[1]
if not weights_layer_read_name.endswith("/read"):
raise ValueError(
"Weight layer's name input to conv layer does not end with '/read'")
weights_layer_param_name = weights_layer_read_name[:-5]
weights_node = name_to_order_node[weights_layer_param_name].node
if weights_node.op != "VariableV2":
raise ValueError("Weight layer is not of type VariableV2")
shape = weights_node.attr["shape"]
logging.vlog(4, "weight shape = %s", shape)
kernel_size_y = shape.shape.dim[0].size
kernel_size_x = shape.shape.dim[1].size
return kernel_size_x, kernel_size_y
示例5: testImplicitLargeDiag
def testImplicitLargeDiag(self):
mu = np.array([[1., 2, 3],
[11, 22, 33]]) # shape: [b, k] = [2, 3]
u = np.array([[[1., 2],
[3, 4],
[5, 6]],
[[0.5, 0.75],
[1, 0.25],
[1.5, 1.25]]]) # shape: [b, k, r] = [2, 3, 2]
m = np.array([[0.1, 0.2],
[0.4, 0.5]]) # shape: [b, r] = [2, 2]
scale = np.stack([
np.eye(3) + np.matmul(np.matmul(u[0], np.diag(m[0])),
np.transpose(u[0])),
np.eye(3) + np.matmul(np.matmul(u[1], np.diag(m[1])),
np.transpose(u[1])),
])
cov = np.stack([np.matmul(scale[0], scale[0].T),
np.matmul(scale[1], scale[1].T)])
logging.vlog(2, "expected_cov:\n{}".format(cov))
with self.test_session():
mvn = ds.MultivariateNormalDiagPlusLowRank(
loc=mu,
scale_perturb_factor=u,
scale_perturb_diag=m)
self.assertAllClose(cov, mvn.covariance().eval(), atol=0., rtol=1e-6)
示例6: _time_performance_run_normal_lstm
def _time_performance_run_normal_lstm(
self, test_config, x_train, y_train):
# Get performance number for standard LSTM on GPU.
input_shape = test_config['input_shape']
rnn_state_size = test_config['rnn_state_size']
timestep = test_config['timestep']
epoch = test_config['epoch']
warmup_epoch = test_config['warmup_epoch']
ops.reset_default_graph()
with self.test_session(use_gpu=True):
layer = keras.layers.LSTM(rnn_state_size)
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
outputs = layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile('sgd', 'mse')
total_duration = 0
for i in range(epoch):
start_time = time.time()
model.fit(x_train, y_train)
end_time = time.time()
if i >= warmup_epoch:
duration_per_epoch = end_time - start_time
total_duration += duration_per_epoch
logging.vlog(2, '%s: Time consumed for epoch %d is: %s',
'Normal LSTM', i, duration_per_epoch)
logging.info('Average performance for %s per epoch is: %s',
'Normal LSTM', (total_duration / epoch))
return total_duration / epoch
示例7: register
def register(self, candidate, name=None):
"""Registers a Python object "candidate" for the given "name".
Args:
candidate: The candidate object to add to the registry.
name: An optional string specifying the registry key for the candidate.
If None, candidate.__name__ will be used.
Raises:
KeyError: If same name is used twice.
"""
if not name:
name = candidate.__name__
if name in self._registry:
(filename, line_number, function_name, _) = (
self._registry[name][_LOCATION_TAG])
raise KeyError("Registering two %s with name '%s'! "
"(Previous registration was in %s %s:%d)" %
(self._name, name, function_name, filename, line_number))
logging.vlog(1, "Registering %s (%s) in %s.", name, candidate, self._name)
# stack trace is [this_function, Register(), user_function,...]
# so the user function is #2.
stack = tf_stack.extract_stack()
stack_index = min(2, len(stack)-1)
if stack_index >= 0:
user_function = stack[stack_index]
location_tag = tf_stack.convert_stack([user_function])[0]
else:
location_tag = "UNKNOWN"
self._registry[name] = {_TYPE_TAG: candidate, _LOCATION_TAG: location_tag}
示例8: _integrator_conserves_energy
def _integrator_conserves_energy(self, x, event_dims, sess,
feed_dict=None):
def potential_and_grad(x):
log_prob, grad = self._log_gamma_log_prob_grad(x, event_dims)
return -log_prob, -grad
step_size = array_ops.placeholder(np.float32, [], name='step_size')
hmc_lf_steps = array_ops.placeholder(np.int32, [], name='hmc_lf_steps')
if feed_dict is None:
feed_dict = {}
feed_dict[hmc_lf_steps] = 1000
m = random_ops.random_normal(array_ops.shape(x))
potential_0, grad_0 = potential_and_grad(x)
old_energy = potential_0 + 0.5 * math_ops.reduce_sum(m * m,
event_dims)
_, new_m, potential_1, _ = (
hmc.leapfrog_integrator(step_size, hmc_lf_steps, x,
m, potential_and_grad, grad_0))
new_energy = potential_1 + 0.5 * math_ops.reduce_sum(new_m * new_m,
event_dims)
x_shape = sess.run(x, feed_dict).shape
n_event_dims = self._n_event_dims(x_shape, event_dims)
feed_dict[step_size] = 0.1 / n_event_dims
old_energy_val, new_energy_val = sess.run([old_energy, new_energy],
feed_dict)
logging.vlog(1, 'average energy change: {}'.format(
abs(old_energy_val - new_energy_val).mean()))
self.assertAllEqual(np.ones_like(new_energy_val, dtype=np.bool),
abs(old_energy_val - new_energy_val) < 1.)
示例9: _time_performance_run_unifed_lstm_gpu
def _time_performance_run_unifed_lstm_gpu(
self, test_config, x_train, y_train):
# Get performance number for Unified_LSTM with grappler swap the impl
input_shape = test_config['input_shape']
rnn_state_size = test_config['rnn_state_size']
timestep = test_config['timestep']
epoch = test_config['epoch']
warmup_epoch = test_config['warmup_epoch']
ops.reset_default_graph()
K.set_session(session.Session(config=self.config))
layer = UnifiedLSTM(rnn_state_size)
inputs = keras.layers.Input(
shape=[timestep, input_shape], dtype=dtypes.float32)
outputs, _ = layer(inputs)
model = keras.models.Model(inputs, outputs)
model.compile('sgd', 'mse')
total_duration = 0
for i in range(epoch):
start_time = time.time()
model.fit(x_train, y_train)
end_time = time.time()
if i >= warmup_epoch:
duration_per_epoch = end_time - start_time
total_duration += duration_per_epoch
logging.vlog(2, '%s: Time consumed for epoch %d is: %s',
'Unified LSTM', i, duration_per_epoch)
logging.info('Average performance for %s per epoch is: %s',
'Unified LSTM', (total_duration / epoch))
return total_duration / epoch
示例10: _ais_gets_correct_log_normalizer
def _ais_gets_correct_log_normalizer(self, init, event_dims, sess,
feed_dict=None):
def proposal_log_prob(x):
return math_ops.reduce_sum(-0.5 * x * x - 0.5 * np.log(2*np.pi),
event_dims)
def target_log_prob(x):
return self._log_gamma_log_prob(x, event_dims)
if feed_dict is None:
feed_dict = {}
w, _, _ = hmc.ais_chain(200, 0.5, 2, init, target_log_prob,
proposal_log_prob, event_dims)
w_val = sess.run(w, feed_dict)
init_shape = sess.run(init, feed_dict).shape
normalizer_multiplier = np.prod([init_shape[i] for i in event_dims])
true_normalizer = -self._shape_param * np.log(self._rate_param)
true_normalizer += special.gammaln(self._shape_param)
true_normalizer *= normalizer_multiplier
n_weights = np.prod(w_val.shape)
normalized_w = np.exp(w_val - true_normalizer)
standard_error = np.std(normalized_w) / np.sqrt(n_weights)
logging.vlog(1, 'True normalizer {}, estimated {}, n_weights {}'.format(
true_normalizer, np.log(normalized_w.mean()) + true_normalizer,
n_weights))
self.assertNear(normalized_w.mean(), 1.0, 4.0 * standard_error)
示例11: _chain_gets_correct_expectations
def _chain_gets_correct_expectations(self, x, event_dims, sess,
feed_dict=None):
def log_gamma_log_prob(x):
return self._log_gamma_log_prob(x, event_dims)
step_size = array_ops.placeholder(np.float32, [], name='step_size')
hmc_lf_steps = array_ops.placeholder(np.int32, [], name='hmc_lf_steps')
hmc_n_steps = array_ops.placeholder(np.int32, [], name='hmc_n_steps')
if feed_dict is None:
feed_dict = {}
feed_dict.update({step_size: 0.1,
hmc_lf_steps: 2,
hmc_n_steps: 300})
sample_chain, acceptance_prob_chain = hmc.chain([hmc_n_steps],
step_size,
hmc_lf_steps,
x, log_gamma_log_prob,
event_dims)
acceptance_probs, samples = sess.run([acceptance_prob_chain, sample_chain],
feed_dict)
samples = samples[feed_dict[hmc_n_steps] // 2:]
expected_x_est = samples.mean()
expected_exp_x_est = np.exp(samples).mean()
logging.vlog(1, 'True E[x, exp(x)]: {}\t{}'.format(
self._expected_x, self._expected_exp_x))
logging.vlog(1, 'Estimated E[x, exp(x)]: {}\t{}'.format(
expected_x_est, expected_exp_x_est))
self.assertNear(expected_x_est, self._expected_x, 2e-2)
self.assertNear(expected_exp_x_est, self._expected_exp_x, 2e-2)
self.assertTrue((acceptance_probs > 0.5).all())
self.assertTrue((acceptance_probs <= 1.0).all())
示例12: _stride_size
def _stride_size(node, name_to_node):
"""Computes stride size given a TF node.
Args:
node: Tensorflow node (NodeDef proto).
Returns:
stride_x: Stride size for horizontal direction (integer).
stride_y: Stride size for vertical direction (integer).
"""
if node.op == "MaxPoolV2":
strides_input_name = node.input[2]
if not strides_input_name.endswith("/strides"):
raise ValueError("Strides name does not end with '/strides'")
strides_node = name_to_node[strides_input_name]
value = strides_node.attr["value"]
t = make_ndarray(value.tensor)
stride_y = t[1]
stride_x = t[2]
else:
strides_attr = node.attr["strides"]
logging.vlog(4, "strides_attr = %s", strides_attr)
stride_y = strides_attr.list.i[1]
stride_x = strides_attr.list.i[2]
return stride_x, stride_y
示例13: _runSamplingBenchmark
def _runSamplingBenchmark(self, name, create_distribution, use_gpu,
num_components, batch_size, num_features,
sample_size):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
np.random.seed(127)
with session.Session(config=config, graph=ops.Graph()) as sess:
random_seed.set_random_seed(0)
with ops.device("/device:GPU:0" if use_gpu else "/cpu:0"):
mixture = create_distribution(
num_components=num_components,
batch_size=batch_size,
num_features=num_features)
sample_op = mixture.sample(sample_size).op
sess.run(variables.global_variables_initializer())
reported = self.run_op_benchmark(
sess,
sample_op,
min_iters=10,
name=("%s_%s_components_%d_batch_%d_features_%d_sample_%d" %
(name, use_gpu, num_components, batch_size, num_features,
sample_size)))
logging.vlog(2, "\t".join(["%s", "%d", "%d", "%d", "%d", "%g"]) % (
use_gpu, num_components, batch_size, num_features, sample_size,
reported["wall_time"]))
示例14: _compute_numeric_jacobian
def _compute_numeric_jacobian(x, x_shape, x_data, y, y_shape, delta,
extra_feed_dict):
"""Computes the numeric Jacobian for dy/dx.
Computes the numeric Jacobian by slightly perturbing the inputs and
measuring the differences on the output.
Args:
x: the tensor "x".
x_shape: the dimensions of x as a tuple or an array of ints.
x_data: a numpy array as the input data for x
y: the tensor "y".
y_shape: the dimensions of y as a tuple or an array of ints.
delta: the amount of perturbation we give to the input
extra_feed_dict: dict that allows fixing specified tensor values
during the jacobian calculation.
Returns:
A 2-d numpy array representing the Jacobian for dy/dx. It has "x_size" rows
and "y_size" columns where "x_size" is the number of elements in x and
"y_size" is the number of elements in y.
"""
# bfloat16 doesn't have enough bits to represent high precision numbers such
# as delta. Convert to float32 here. Since numeric_jacobian is expected to
# be the groundtruth to compare against, it shouldn't lose any information.
if x.dtype == dtypes.bfloat16:
x = math_ops.cast(x, dtypes.float32) # TODO(wangpeng): Now that the new x
# is an output of the old x, isn't feeding to the new x a mistake?
if y.dtype == dtypes.bfloat16:
y = math_ops.cast(y, dtypes.float32)
if x_data.dtype == dtypes.bfloat16.as_numpy_dtype:
x_data = x_data.astype(np.float32)
# To compute the jacobian, we treat x and y as one-dimensional vectors
x_size = _product(x_shape) * (2 if x.dtype.is_complex else 1)
y_size = _product(y_shape) * (2 if y.dtype.is_complex else 1)
x_dtype = x.dtype.real_dtype.as_numpy_dtype
y_dtype = y.dtype.real_dtype.as_numpy_dtype
# Make sure we have the right types
x_data = np.asarray(x_data, dtype=x.dtype.as_numpy_dtype)
scale = np.asarray(2 * delta, dtype=y_dtype)[()]
jacobian = np.zeros((x_size, y_size), dtype=x_dtype)
# For each of the entry of x, we slightly perturbs this by adding and
# subtracting a delta and then compute difference between the outputs. This
# will give us one row of the Jacobian matrix.
for row in range(x_size):
x_pos = x_data.copy()
x_neg = x_data.copy()
x_pos.ravel().view(x_dtype)[row] += delta
y_pos = y.eval(feed_dict=_extra_feeds(extra_feed_dict, {x: x_pos}))
x_neg.ravel().view(x_dtype)[row] -= delta
y_neg = y.eval(feed_dict=_extra_feeds(extra_feed_dict, {x: x_neg}))
diff = (y_pos - y_neg) / scale
jacobian[row, :] = diff.ravel().view(y_dtype)
logging.vlog(1, "Numeric Jacobian =\n%s", jacobian)
return jacobian
示例15: _chain_gets_correct_expectations
def _chain_gets_correct_expectations(self, x, independent_chain_ndims,
sess, feed_dict=None):
counter = collections.Counter()
def log_gamma_log_prob(x):
counter["target_calls"] += 1
event_dims = math_ops.range(independent_chain_ndims,
array_ops.rank(x))
return self._log_gamma_log_prob(x, event_dims)
num_results = array_ops.placeholder(
np.int32, [], name="num_results")
step_size = array_ops.placeholder(
np.float32, [], name="step_size")
num_leapfrog_steps = array_ops.placeholder(
np.int32, [], name="num_leapfrog_steps")
if feed_dict is None:
feed_dict = {}
feed_dict.update({num_results: 150,
step_size: 0.05,
num_leapfrog_steps: 2})
samples, kernel_results = hmc.sample_chain(
num_results=num_results,
target_log_prob_fn=log_gamma_log_prob,
current_state=x,
step_size=step_size,
num_leapfrog_steps=num_leapfrog_steps,
num_burnin_steps=150,
seed=42)
self.assertAllEqual(dict(target_calls=2), counter)
expected_x = (math_ops.digamma(self._shape_param)
- np.log(self._rate_param))
expected_exp_x = self._shape_param / self._rate_param
log_accept_ratio_, samples_, expected_x_ = sess.run(
[kernel_results.log_accept_ratio, samples, expected_x],
feed_dict)
actual_x = samples_.mean()
actual_exp_x = np.exp(samples_).mean()
acceptance_probs = np.exp(np.minimum(log_accept_ratio_, 0.))
logging_ops.vlog(1, "True E[x, exp(x)]: {}\t{}".format(
expected_x_, expected_exp_x))
logging_ops.vlog(1, "Estimated E[x, exp(x)]: {}\t{}".format(
actual_x, actual_exp_x))
self.assertNear(actual_x, expected_x_, 2e-2)
self.assertNear(actual_exp_x, expected_exp_x, 2e-2)
self.assertAllEqual(np.ones_like(acceptance_probs, np.bool),
acceptance_probs > 0.5)
self.assertAllEqual(np.ones_like(acceptance_probs, np.bool),
acceptance_probs <= 1.)