本文整理汇总了Python中tensorflow.python.platform.tf_logging.warn函数的典型用法代码示例。如果您正苦于以下问题:Python warn函数的具体用法?Python warn怎么用?Python warn使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了warn函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _testMoments
def _testMoments(self, dt):
try:
from scipy import stats # pylint: disable=g-import-not-at-top
except ImportError as e:
tf_logging.warn("Cannot test moments: %s" % e)
return
# The moments test is a z-value test. This is the largest z-value
# we want to tolerate. Since the z-test approximates a unit normal
# distribution, it should almost definitely never exceed 6.
z_limit = 6.0
for stride in 0, 1, 4, 17:
alphas = [0.2, 1.0, 3.0]
if dt == dtypes.float64:
alphas = [0.01] + alphas
for alpha in alphas:
for scale in 9, 17:
# Gamma moments only defined for values less than the scale param.
max_moment = min(6, scale // 2)
sampler = self._Sampler(
20000, alpha, 1 / scale, dt, use_gpu=False, seed=12345)
z_scores = util.test_moment_matching(
sampler(),
max_moment,
stats.gamma(alpha, scale=scale),
stride=stride,
)
self.assertAllLess(z_scores, z_limit)
示例2: train
def train(self, delay_secs=None):
"""Fit the estimator using the training data.
Train the estimator for `self._train_steps` steps, after waiting for
`delay_secs` seconds. If `self._train_steps` is `None`, train forever.
Args:
delay_secs: Start training after this many seconds.
Returns:
The trained estimator.
"""
start = time.time()
# Start the server, if needed. It's important to start the server before
# we (optionally) sleep for the case where no device_filters are set.
# Otherwise, the servers will wait to connect to each other before starting
# to train. We might as well start as soon as we can.
config = self._estimator.config
if isinstance(config, run_config.RunConfig):
if (config.cluster_spec and config.master and
config.environment == run_config.Environment.LOCAL):
logging.warn("ClusterSpec and master are provided, but environment is "
"set to 'local'. Set environment to 'cloud' if you intend "
"to use the distributed runtime.")
if (config.environment != run_config.Environment.LOCAL and
config.environment != run_config.Environment.GOOGLE and
config.cluster_spec and config.master):
self._start_server()
elif config.cluster_spec and config.master:
raise ValueError(
"For distributed runtime, Experiment class only works with "
"tf.contrib.learn.RunConfig for now, but provided {}".format(
type(config)))
extra_hooks = []
if delay_secs is None:
task_id = self._estimator.config.task_id or 0
if self._delay_workers_by_global_step:
# Wait 5500 global steps for the second worker. Each worker waits more
# then previous one but with a diminishing number of steps.
extra_hooks.append(
basic_session_run_hooks.GlobalStepWaiterHook(
int(8000.0 * math.log(task_id + 1))))
delay_secs = 0
else:
# Wait 5 secs more for each new worker up to 60 secs.
delay_secs = min(60, task_id * 5)
if delay_secs > 0:
elapsed_secs = time.time() - start
remaining = delay_secs - elapsed_secs
logging.info("Waiting %d secs before starting training.", remaining)
time.sleep(delay_secs)
return self._call_train(
input_fn=self._train_input_fn,
max_steps=self._train_steps,
hooks=self._train_monitors + extra_hooks,
saving_listeners=self._saving_listeners)
示例3: surrogate_loss
def surrogate_loss(sample_losses,
stochastic_tensors=None,
name="SurrogateLoss"):
"""Surrogate loss for stochastic graphs.
This function will call `loss_fn` on each `StochasticTensor`
upstream of `sample_losses`, passing the losses that it influenced.
Note that currently `surrogate_loss` does not work with `StochasticTensor`s
instantiated in `while_loop`s or other control structures.
Args:
sample_losses: a list or tuple of final losses. Each loss should be per
example in the batch (and possibly per sample); that is, it should have
dimensionality of 1 or greater. All losses should have the same shape.
stochastic_tensors: a list of `StochasticTensor`s to add loss terms for.
If None, defaults to all `StochasticTensor`s in the graph upstream of
the `Tensor`s in `sample_losses`.
name: the name with which to prepend created ops.
Returns:
`Tensor` loss, which is the sum of `sample_losses` and the
`loss_fn`s returned by the `StochasticTensor`s.
Raises:
TypeError: if `sample_losses` is not a list or tuple, or if its elements
are not `Tensor`s.
ValueError: if any loss in `sample_losses` does not have dimensionality 1
or greater.
"""
with ops.op_scope(sample_losses, name):
fixed_losses = []
if not isinstance(sample_losses, (list, tuple)):
raise TypeError("sample_losses must be a list or tuple")
for loss in sample_losses:
if not isinstance(loss, ops.Tensor):
raise TypeError("loss is not a Tensor: %s" % loss)
ndims = loss.get_shape().ndims
if not (ndims is not None and ndims >= 1):
raise ValueError("loss must have dimensionality 1 or greater: %s" %
loss)
fixed_losses.append(array_ops.stop_gradient(loss))
stoch_dependencies_map = _stochastic_dependencies_map(
fixed_losses, stochastic_tensors=stochastic_tensors)
if not stoch_dependencies_map:
logging.warn(
"No collection of Stochastic Tensors found for current graph.")
return math_ops.add_n(sample_losses)
# Iterate through all of the stochastic dependencies, adding
# surrogate terms where necessary.
sample_losses = [ops.convert_to_tensor(loss) for loss in sample_losses]
loss_terms = sample_losses
for (stoch_node, dependent_losses) in stoch_dependencies_map.items():
loss_term = stoch_node.loss(list(dependent_losses))
if loss_term is not None:
loss_terms.append(loss_term)
return math_ops.add_n(loss_terms)
示例4: _check_trt_version_compatibility
def _check_trt_version_compatibility():
"""Check compatibility of TensorRT version.
Raises:
RuntimeError: if the TensorRT library version is incompatible.
"""
compiled_version = get_linked_tensorrt_version()
loaded_version = get_loaded_tensorrt_version()
tf_logging.info("Linked TensorRT version: %s" % str(compiled_version))
tf_logging.info("Loaded TensorRT version: %s" % str(loaded_version))
version_mismatch = False
if loaded_version[0] < compiled_version[0]:
tf_logging.error(
"TensorRT version mismatch. Tensorflow was compiled against " +
"TensorRT %s but library loaded from environment is TensorRT %s" %
(".".join([str(x) for x in compiled_version]),
".".join([str(x) for x in loaded_version])) +
". Please make sure that correct version of TensorRT " +
"is available in the system and added to ldconfig or LD_LIBRARY_PATH")
raise RuntimeError("Incompatible TensorRT library version")
for i in zip(loaded_version, compiled_version):
if i[0] != i[1]:
tf_logging.warn("TensorRT mismatch. Compiled against version " +
"%s, but loaded %s. Things may not work" %
(".".join([str(x) for x in compiled_version]),
".".join([str(x) for x in loaded_version])))
version_mismatch = True
break
if not version_mismatch:
tf_logging.info("Running against TensorRT version %s" %
".".join([str(x) for x in loaded_version]))
示例5: __init__
def __init__(self, num_units, forget_bias=1.0,
state_is_tuple=True, activation=None, reuse=None):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
Must set to `0.0` manually when restoring from CudnnLSTM-trained
checkpoints.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. If False, they are concatenated
along the column axis. The latter behavior will soon be deprecated.
activation: Activation function of the inner states. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
super(BasicLSTMCell, self).__init__(_reuse=reuse)
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
self._num_units = num_units
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation or math_ops.tanh
示例6: _testZeroDensity
def _testZeroDensity(self, alpha):
"""Zero isn't in the support of the gamma distribution.
But quantized floating point math has its limits.
TODO(bjp): Implement log-gamma sampler for small-shape distributions.
Args:
alpha: float shape value to test
"""
try:
from scipy import stats # pylint: disable=g-import-not-at-top
except ImportError as e:
tf_logging.warn("Cannot test zero density proportions: %s" % e)
return
allowable_zeros = {
dtypes.float16: stats.gamma(alpha).cdf(np.finfo(np.float16).tiny),
dtypes.float32: stats.gamma(alpha).cdf(np.finfo(np.float32).tiny),
dtypes.float64: stats.gamma(alpha).cdf(np.finfo(np.float64).tiny)
}
failures = []
for use_gpu in [False, True]:
for dt in dtypes.float16, dtypes.float32, dtypes.float64:
sampler = self._Sampler(
10000, alpha, 1.0, dt, use_gpu=use_gpu, seed=12345)
x = sampler()
allowable = allowable_zeros[dt] * x.size
allowable = allowable * 2 if allowable < 10 else allowable * 1.05
if np.sum(x <= 0) > allowable:
failures += [(use_gpu, dt)]
self.assertEqual([], failures)
示例7: validateKolmogorovSmirnov
def validateKolmogorovSmirnov(self,
shape,
mean,
stddev,
minval,
maxval,
seed=1618):
try:
import scipy.stats # pylint: disable=g-import-not-at-top
random_seed.set_random_seed(seed)
with self.test_session(use_gpu=True):
samples = random_ops.parameterized_truncated_normal(shape, mean, stddev,
minval,
maxval).eval()
assert (~np.isnan(samples)).all()
minval = max(mean - stddev * 10, minval)
maxval = min(mean + stddev * 10, maxval)
dist = scipy.stats.norm(loc=mean, scale=stddev)
cdf_min = dist.cdf(minval)
cdf_max = dist.cdf(maxval)
def truncated_cdf(x):
return np.clip((dist.cdf(x) - cdf_min) / (cdf_max - cdf_min), 0.0, 1.0)
pvalue = scipy.stats.kstest(samples, truncated_cdf)[1]
self.assertGreater(pvalue, 1e-10)
except ImportError as e:
tf_logging.warn("Cannot test truncated normal op: %s" % str(e))
示例8: _get_timestamped_export_dir
def _get_timestamped_export_dir(export_dir_base):
# When we create a timestamped directory, there is a small chance that the
# directory already exists because another worker is also writing exports.
# In this case we just wait one second to get a new timestamp and try again.
# If this fails several times in a row, then something is seriously wrong.
max_directory_creation_attempts = 10
attempts = 0
while attempts < max_directory_creation_attempts:
export_timestamp = int(time.time())
export_dir = os.path.join(
compat.as_bytes(export_dir_base), compat.as_bytes(
str(export_timestamp)))
if not gfile.Exists(export_dir):
# Collisions are still possible (though extremely unlikely): this
# directory is not actually created yet, but it will be almost
# instantly on return from this function.
return export_dir
time.sleep(1)
attempts += 1
logging.warn(
"Export directory {} already exists; retrying (attempt {}/{})".format(
export_dir, attempts, max_directory_creation_attempts))
raise RuntimeError("Failed to obtain a unique export directory name after "
"{} attempts.".format(max_directory_creation_attempts))
示例9: __init__
def __init__(self, num_units, forget_bias=1.0, input_size=None,
state_is_tuple=True, activation=tanh, reuse=None):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
input_size: Deprecated and unused.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. If False, they are concatenated
along the column axis. The latter behavior will soon be deprecated.
activation: Activation function of the inner states.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
"""
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated.", self)
self._num_units = num_units
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
self._reuse = reuse
示例10: _testCompareToExplicitDerivative
def _testCompareToExplicitDerivative(self, dtype):
"""Compare to the explicit reparameterization derivative.
Verifies that the computed derivative satisfies
dsample / dalpha = d igammainv(alpha, u) / dalpha,
where u = igamma(alpha, sample).
Args:
dtype: TensorFlow dtype to perform the computations in.
"""
delta = 1e-3
np_dtype = dtype.as_numpy_dtype
try:
from scipy import misc # pylint: disable=g-import-not-at-top
from scipy import special # pylint: disable=g-import-not-at-top
alpha_val = np.logspace(-2, 3, dtype=np_dtype)
alpha = constant_op.constant(alpha_val)
sample = random_ops.random_gamma([], alpha, np_dtype(1.0), dtype=dtype)
actual = gradients_impl.gradients(sample, alpha)[0]
(sample_val, actual_val) = self.evaluate((sample, actual))
u = special.gammainc(alpha_val, sample_val)
expected_val = misc.derivative(
lambda alpha_prime: special.gammaincinv(alpha_prime, u),
alpha_val, dx=delta * alpha_val)
self.assertAllClose(actual_val, expected_val, rtol=1e-3, atol=1e-3)
except ImportError as e:
tf_logging.warn("Cannot use special functions in a test: %s" % str(e))
示例11: __init__
def __init__(self, num_units, recurrence_depth=1, transfer_bias=-2.0, input_size=None,
state_is_tuple=False, activation=tanh):
"""Initialize the basic RHN cell.
Args:
num_units: int, The number of units per recurrence depth in the RHN cell.
forget_bias: float, The bias added to forget gates (see above).
recurrence_depth: int, Number of recurrent layers in the RHN network
input_size: Deprecated and unused.
state_is_tuple: If True, accepted and returned states are 1-tuple of
the `m_state`. By default (False).
This default behavior will soon be deprecated.
activation: Activation function of the inner states.
"""
if not state_is_tuple:
logging.warn(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True." % self)
if input_size is not None:
logging.warn("%s: The input_size parameter is deprecated." % self)
self._num_units = num_units
self._recurrence_depth = recurrence_depth
self._transfer_bias = transfer_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
示例12: _changing_default_center_bias
def _changing_default_center_bias():
logging.warn(
"Change warning: default value of `enable_centered_bias` will change"
" after 2016-10-09. It will be disabled by default."
"Instructions for keeping existing behaviour:\n"
"Explicitly set `enable_centered_bias` to 'True' if you want to keep "
"existing behaviour.")
示例13: __init__
def __init__(self, num_units, forget_bias=1.0,
state_is_tuple=True, activation=None, reuse=None, name=None):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above).
Must set to `0.0` manually when restoring from CudnnLSTM-trained
checkpoints.
state_is_tuple: If True, accepted and returned states are 2-tuples of
the `c_state` and `m_state`. If False, they are concatenated
along the column axis. The latter behavior will soon be deprecated.
activation: Activation function of the inner states. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such
cases.
When restoring from CudnnLSTM-trained checkpoints, must use
`CudnnCompatibleLSTMCell` instead.
"""
super(BasicLSTMCell, self).__init__(_reuse=reuse, name=name)
if not state_is_tuple:
logging.warn("%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
# Inputs must be 2-dimensional.
self.input_spec = base_layer.InputSpec(ndim=2)
self._num_units = num_units
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation or math_ops.tanh
示例14: _write_dict_to_summary
def _write_dict_to_summary(output_dir,
dictionary,
current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = writer_cache.FileWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
if key == 'global_step':
continue
value = summary_proto.value.add()
value.tag = key
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
value.simple_value = float(dictionary[key])
elif (isinstance(dictionary[key], np.int64) or
isinstance(dictionary[key], np.int32) or
isinstance(dictionary[key], int)):
value.simple_value = int(dictionary[key])
else:
logging.warn(
'Skipping summary for %s, must be a float, np.float32, np.int64, '
'np.int32 or int.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
示例15: _garbage_collect_exports
def _garbage_collect_exports(self, export_dir_base):
"""Deletes older exports, retaining only a given number of the most recent.
Export subdirectories are assumed to be named with monotonically increasing
integers; the most recent are taken to be those with the largest values.
Args:
export_dir_base: the base directory under which each export is in a
versioned subdirectory.
"""
if self._exports_to_keep is None:
return
def _export_version_parser(path):
# create a simple parser that pulls the export_version from the directory.
filename = os.path.basename(path.path)
if not (len(filename) == 10 and filename.isdigit()):
return None
return path._replace(export_version=int(filename))
# pylint: disable=protected-access
keep_filter = gc._largest_export_versions(self._exports_to_keep)
delete_filter = gc._negation(keep_filter)
for p in delete_filter(
gc._get_paths(export_dir_base, parser=_export_version_parser)):
try:
gfile.DeleteRecursively(p.path)
except errors_impl.NotFoundError as e:
tf_logging.warn('Can not delete %s recursively: %s', p.path, e)