本文整理汇总了Python中tensorflow.python.platform.logging.info函数的典型用法代码示例。如果您正苦于以下问题:Python info函数的具体用法?Python info怎么用?Python info使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了info函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
def main(unused_argv=None):
if FLAGS.debug:
logging.set_verbosity(logging.DEBUG)
if not FLAGS.logdir:
logging.error('A logdir must be specified. Run `tensorboard --help` for '
'details and examples.')
return -1
if FLAGS.debug:
logging.info('Starting TensorBoard in directory %s', os.getcwd())
path_to_run = ParseEventFilesFlag(FLAGS.logdir)
multiplexer = event_multiplexer.AutoloadingMultiplexer(
path_to_run=path_to_run, interval_secs=60,
size_guidance=TENSORBOARD_SIZE_GUIDANCE)
multiplexer.AutoUpdate(interval=30)
factory = functools.partial(tensorboard_handler.TensorboardHandler,
multiplexer)
try:
server = ThreadedHTTPServer((FLAGS.host, FLAGS.port), factory)
except socket.error:
logging.error('Tried to connect to port %d, but that address is in use.',
FLAGS.port)
return -2
status_bar.SetupStatusBarInsideGoogle('TensorBoard', FLAGS.port)
print('Starting TensorBoard on port %d' % FLAGS.port)
print('(You can navigate to http://localhost:%d)' % FLAGS.port)
server.serve_forever()
示例2: l1_regularizer
def l1_regularizer(scale):
"""Returns a function that can be used to apply L1 regularization to weights.
L1 regularization encourages sparsity.
Args:
scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
Returns:
A function with signature `l1(weights, name=None)` that apply L1
regularization.
Raises:
ValueError: If scale is outside of the range [0.0, 1.0] or if scale is not a
float.
"""
if isinstance(scale, numbers.Integral):
raise ValueError("scale cannot be an integer: %s" % scale)
if isinstance(scale, numbers.Real):
if scale < 0.0:
raise ValueError("Setting a scale less than 0 on a regularizer: %g" % scale)
if scale >= 1.0:
raise ValueError("Setting a scale greater than 1 on a regularizer: %g" % scale)
if scale == 0.0:
logging.info("Scale of 0 disables regularizer.")
return lambda _, name=None: None
def l1(weights, name=None):
"""Applies L1 regularization to weights."""
with ops.op_scope([weights], name, "l1_regularizer") as scope:
my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name="scale")
return standard_ops.mul(my_scale, standard_ops.reduce_sum(standard_ops.abs(weights)), name=scope)
return l1
示例3: Load
def Load():
for (path, name) in six.iteritems(path_to_run):
logging.info('Checking for new runs in %s', path)
multiplexer.AddRunsFromDirectory(path, name)
t = threading.Timer(interval_secs, Load)
t.daemon = True
t.start()
示例4: ListRecursively
def ListRecursively(top):
"""Walks a directory tree, yielding (dir_path, file_paths) tuples.
For each top |top| and its subdirectories, yields a tuple containing the path
to the directory and the path to each of the contained files. Note that
unlike os.Walk()/gfile.Walk(), this does not list subdirectories and the file
paths are all absolute.
Args:
top: A path to a GCS directory.
Returns:
A list of (dir_path, file_paths) tuples.
"""
if top.endswith('/'):
wildcard = top + '**'
else:
wildcard = top + '/**'
tuples = []
try:
file_paths = ListDirectory(wildcard)
except subprocess.CalledProcessError as e:
logging.info('%s, assuming it means no files were found', e)
return []
for file_path in file_paths:
dir_path = os.path.dirname(file_path)
if tuples and tuples[-1][0] == dir_path:
tuples[-1][1].append(file_path)
else:
tuples.append((dir_path, [file_path]))
return tuples
示例5: wait_for_session
def wait_for_session(self, master, config=None):
"""Creates a new `Session` and waits for model to be ready.
Creates a new `Session` on 'master'. Waits for the model to be
initialized or recovered from a checkpoint. It's expected that
another thread or process will make the model ready, and that this
is intended to be used by threads/processes that participate in a
distributed training configuration where a different thread/process
is responsible for initializing or recovering the model being trained.
Args:
master: `String` representation of the TensorFlow master to use.
config: Optional ConfigProto proto used to configure the session.
Returns:
sess: A `Session`.
"""
target = self._maybe_launch_in_process_server(master)
sess = session.Session(target, graph=self._graph, config=config)
if self._local_init_op:
sess.run([self._local_init_op])
while True:
not_ready = self._model_not_ready(sess)
if not not_ready:
break
self._safe_close(sess)
logging.info("Waiting for model to be ready: %s", not_ready)
time.sleep(self._recovery_wait_secs)
sess = session.Session(master, graph=self._graph)
return sess
示例6: _serve_static_file
def _serve_static_file(self, path):
"""Serves the static file located at the given path.
Args:
path: The path of the static file, relative to the tensorboard/ directory.
"""
# Strip off the leading forward slash.
path = path.lstrip('/')
if not self._path_is_safe(path):
logging.info('path %s not safe, sending 404', path)
# Traversal attack, so 404.
self.send_error(404)
return
if path.startswith('external'):
path = os.path.join('../', path)
else:
path = os.path.join('tensorboard', path)
# Open the file and read it.
try:
contents = resource_loader.load_resource(path)
except IOError:
logging.info('path %s not found, sending 404', path)
self.send_error(404)
return
self.send_response(200)
mimetype = mimetypes.guess_type(path)[0] or 'application/octet-stream'
self.send_header('Content-Type', mimetype)
self.end_headers()
self.wfile.write(contents)
示例7: l2_regularizer
def l2_regularizer(scale):
"""Returns a function that can be used to apply L2 regularization to weights.
Small values of L2 can help prevent overfitting the training data.
Args:
scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
Returns:
A function with signature `l2(weights, name=None)` that applies L2
regularization.
Raises:
ValueError: If scale is outside of the range [0.0, 1.0] or if scale is not a
float.
"""
if isinstance(scale, numbers.Integral):
raise ValueError("scale cannot be an integer: %s" % (scale,))
if isinstance(scale, numbers.Real):
if scale < 0.0:
raise ValueError("Setting a scale less than 0 on a regularizer: %g." % scale)
if scale >= 1.0:
raise ValueError("Setting a scale greater than 1 on a regularizer: %g." % scale)
if scale == 0.0:
logging.info("Scale of 0 disables regularizer.")
return lambda _, name=None: None
def l2(weights, name=None):
"""Applies l2 regularization to weights."""
with ops.op_scope([weights], name, "l2_regularizer") as scope:
my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name="scale")
return standard_ops.mul(my_scale, nn.l2_loss(weights), name=scope)
return l2
示例8: predicate
def predicate(e):
err_str = e.message
op = e.op
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op
logging.info("Searching within error strings: '%s' within '%s'", expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
示例9: testCoNLLFormat
def testCoNLLFormat(self):
self.WriteContext('conll-sentence')
logging.info('Writing conll file to: %s', self.corpus_file)
with open(self.corpus_file, 'w') as f:
f.write((CONLL_DOC1 + u'\n\n' + CONLL_DOC2 + u'\n')
.replace(' ', '\t').encode('utf-8'))
self.ValidateDocuments()
self.BuildLexicon()
self.ValidateTagToCategoryMap()
示例10: testParseUntilNotAlive
def testParseUntilNotAlive(self):
"""Ensures that the 'alive' condition works in the Cond ops."""
with self.test_session(graph=tf.Graph()) as sess:
t = self.MakeGraph(batch_size=3, beam_size=2, max_steps=5).training
sess.run(t['inits'])
for i in range(5):
logging.info('run %d', i)
tf_alive = t['alive'].eval()
self.assertFalse(any(tf_alive))
示例11: WriteContext
def WriteContext(self, corpus_format):
context = task_spec_pb2.TaskSpec()
self.AddInput('documents', self.corpus_file, corpus_format, context)
for name in ('word-map', 'lcword-map', 'tag-map',
'category-map', 'label-map', 'prefix-table',
'suffix-table', 'tag-to-category'):
self.AddInput(name, os.path.join(FLAGS.test_tmpdir, name), '', context)
logging.info('Writing context to: %s', self.context_file)
with open(self.context_file, 'w') as f:
f.write(str(context))
示例12: _Load
def _Load():
start = time.time()
for (path, name) in six.iteritems(path_to_run):
multiplexer.AddRunsFromDirectory(path, name)
multiplexer.Reload()
duration = time.time() - start
logging.info('Multiplexer done loading. Load took %0.1f secs', duration)
t = threading.Timer(LOAD_INTERVAL, _Load)
t.daemon = True
t.start()
示例13: convert_variables_to_constants
def convert_variables_to_constants(sess, input_graph_def, output_node_names):
"""Replaces all the variables in a graph with constants of the same values.
If you have a trained graph containing Variable ops, it can be convenient to
convert them all to Const ops holding the same values. This makes it possible
to describe the network fully with a single GraphDef file, and allows the
removal of a lot of ops related to loading and saving the variables.
Args:
sess: Active TensorFlow session containing the variables.
input_graph_def: GraphDef object holding the network.
output_node_names: List of name strings for the result nodes of the graph.
Returns:
GraphDef containing a simplified version of the original.
"""
found_variables = {}
variable_names = []
variable_dict_names = []
for node in input_graph_def.node:
if node.op == "Assign":
variable_name = node.input[0]
variable_dict_names.append(variable_name)
variable_names.append(variable_name + ":0")
if variable_names:
returned_variables = sess.run(variable_names)
else:
returned_variables = []
found_variables = dict(zip(variable_dict_names, returned_variables))
logging.info("Frozen %d variables." % len(returned_variables))
# This graph only includes the nodes needed to evaluate the output nodes, and
# removes unneeded nodes like those involved in saving and assignment.
inference_graph = extract_sub_graph(input_graph_def, output_node_names)
output_graph_def = graph_pb2.GraphDef()
how_many_converted = 0
for input_node in inference_graph.node:
output_node = graph_pb2.NodeDef()
if input_node.name in found_variables:
output_node.op = "Const"
output_node.name = input_node.name
dtype = input_node.attr["dtype"]
data = found_variables[input_node.name]
output_node.attr["dtype"].CopyFrom(dtype)
output_node.attr["value"].CopyFrom(attr_value_pb2.AttrValue(
tensor=tensor_util.make_tensor_proto(data,
dtype=dtype.type,
shape=data.shape)))
how_many_converted += 1
else:
output_node.CopyFrom(input_node)
output_graph_def.node.extend([output_node])
print("Converted %d variables to const ops." % how_many_converted)
return output_graph_def
示例14: CheckTokenization
def CheckTokenization(self, sentence, tokenization):
self.WriteContext('english-text')
logging.info('Writing text file to: %s', self.corpus_file)
with open(self.corpus_file, 'w') as f:
f.write(sentence)
sentence, _ = gen_parser_ops.document_source(
self.context_file, batch_size=1)
with self.test_session() as sess:
sentence_doc = self.ReadNextDocument(sess, sentence)
self.assertEqual(' '.join([t.word for t in sentence_doc.token]),
tokenization)
示例15: AddRunsFromDirectory
def AddRunsFromDirectory(self, path, name=None):
"""Load runs from a directory; recursively walks subdirectories.
If path doesn't exist, no-op. This ensures that it is safe to call
`AddRunsFromDirectory` multiple times, even before the directory is made.
If path is a directory, load event files in the directory (if any exist) and
recursively call AddRunsFromDirectory on any subdirectories. This mean you
can call AddRunsFromDirectory at the root of a tree of event logs and
TensorBoard will load them all.
If the `EventMultiplexer` is already loaded this will cause
the newly created accumulators to `Reload()`.
Args:
path: A string path to a directory to load runs from.
name: Optionally, what name to apply to the runs. If name is provided
and the directory contains run subdirectories, the name of each subrun
is the concatenation of the parent name and the subdirectory name. If
name is provided and the directory contains event files, then a run
is added called "name" and with the events from the path.
Raises:
ValueError: If the path exists and isn't a directory.
Returns:
The `EventMultiplexer`.
"""
subdirs = []
if gcs.IsGCSPath(path):
subdirs = [
subdir
for (subdir, files) in gcs.ListRecursively(path)
if list(filter(event_accumulator.IsTensorFlowEventsFile, files))
]
else:
if not gfile.Exists(path):
return # Maybe it hasn't been created yet, fail silently to retry later
if not gfile.IsDirectory(path):
raise ValueError('AddRunsFromDirectory: path exists and is not a '
'directory, %s' % path)
subdirs = [
subdir
for (subdir, _, files) in gfile.Walk(path)
if list(filter(event_accumulator.IsTensorFlowEventsFile, files))
]
for subdir in subdirs:
logging.info('Adding events from directory %s', subdir)
rpath = os.path.relpath(subdir, path)
subname = os.path.join(name, rpath) if name else rpath
self.AddRun(subdir, name=subname)
return self