本文整理汇总了Python中tensorflow.python.lib.io.file_io.read_file_to_string函数的典型用法代码示例。如果您正苦于以下问题:Python read_file_to_string函数的具体用法?Python read_file_to_string怎么用?Python read_file_to_string使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了read_file_to_string函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: local_analysis
def local_analysis(args):
if args.analysis:
# Already analyzed.
return
if not args.schema or not args.features:
raise ValueError('Either --analysis, or both --schema and --features are provided.')
tf_config = json.loads(os.environ.get('TF_CONFIG', '{}'))
cluster_spec = tf_config.get('cluster', {})
if len(cluster_spec.get('worker', [])) > 0:
raise ValueError('If "schema" and "features" are provided, local analysis will run and ' +
'only BASIC scale-tier (no workers node) is supported.')
if cluster_spec and not (args.schema.startswith('gs://') and args.features.startswith('gs://')):
raise ValueError('Cloud trainer requires GCS paths for --schema and --features.')
print('Running analysis.')
schema = json.loads(file_io.read_file_to_string(args.schema).decode())
features = json.loads(file_io.read_file_to_string(args.features).decode())
args.analysis = os.path.join(args.job_dir, 'analysis')
args.transform = True
file_io.recursive_create_dir(args.analysis)
feature_analysis.run_local_analysis(args.analysis, args.train, schema, features)
print('Analysis done.')
示例2: main
def main(argv=None):
args = parse_arguments(sys.argv if argv is None else argv)
if args.schema:
schema = json.loads(
file_io.read_file_to_string(args.schema).decode())
else:
import google.datalab.bigquery as bq
schema = bq.Table(args.bigquery).schema._bq_schema
features = json.loads(
file_io.read_file_to_string(args.features).decode())
file_io.recursive_create_dir(args.output)
if args.cloud:
run_cloud_analysis(
output_dir=args.output,
csv_file_pattern=args.csv,
bigquery_table=args.bigquery,
schema=schema,
features=features)
else:
feature_analysis.run_local_analysis(
output_dir=args.output,
csv_file_pattern=args.csv,
schema=schema,
features=features)
示例3: test_categorical
def test_categorical(self):
output_folder = tempfile.mkdtemp()
input_file_path = tempfile.mkstemp(dir=output_folder)[1]
try:
csv_file = ['red,apple', 'red,pepper', 'red,apple', 'blue,grape',
'blue,apple', 'green,pepper']
file_io.write_string_to_file(
input_file_path,
'\n'.join(csv_file))
schema = [{'name': 'color', 'type': 'STRING'},
{'name': 'type', 'type': 'STRING'}]
features = {'color': {'transform': 'one_hot', 'source_column': 'color'},
'type': {'transform': 'target'}}
feature_analysis.run_local_analysis(
output_folder, [input_file_path], schema, features)
stats = json.loads(
file_io.read_file_to_string(
os.path.join(output_folder, analyze.constant.STATS_FILE)).decode())
self.assertEqual(stats['column_stats']['color']['vocab_size'], 3)
# Color column.
vocab_str = file_io.read_file_to_string(
os.path.join(output_folder, analyze.constant.VOCAB_ANALYSIS_FILE % 'color'))
vocab = pd.read_csv(six.StringIO(vocab_str),
header=None,
names=['color', 'count'])
expected_vocab = pd.DataFrame(
{'color': ['red', 'blue', 'green'], 'count': [3, 2, 1]},
columns=['color', 'count'])
pd.util.testing.assert_frame_equal(vocab, expected_vocab)
finally:
shutil.rmtree(output_folder)
示例4: test_text
def test_text(self):
output_folder = tempfile.mkdtemp()
input_file_path = tempfile.mkstemp(dir=output_folder)[1]
try:
csv_file = ['the quick brown fox,raining in kir,cat1|cat2,true',
'quick brown brown chicken,raining in pdx,cat2|cat3|cat4,false']
file_io.write_string_to_file(
input_file_path,
'\n'.join(csv_file))
schema = [{'name': 'col1', 'type': 'STRING'},
{'name': 'col2', 'type': 'STRING'},
{'name': 'col3', 'type': 'STRING'},
{'name': 'col4', 'type': 'STRING'}]
features = {'col1': {'transform': 'bag_of_words', 'source_column': 'col1'},
'col2': {'transform': 'tfidf', 'source_column': 'col2'},
'col3': {'transform': 'multi_hot', 'source_column': 'col3', 'separator': '|'},
'col4': {'transform': 'target'}}
feature_analysis.run_local_analysis(
output_folder, [input_file_path], schema, features)
stats = json.loads(
file_io.read_file_to_string(
os.path.join(output_folder, analyze.constant.STATS_FILE)).decode())
self.assertEqual(stats['column_stats']['col1']['vocab_size'], 5)
self.assertEqual(stats['column_stats']['col2']['vocab_size'], 4)
self.assertEqual(stats['column_stats']['col3']['vocab_size'], 4)
vocab_str = file_io.read_file_to_string(
os.path.join(output_folder,
analyze.constant.VOCAB_ANALYSIS_FILE % 'col1'))
vocab = pd.read_csv(six.StringIO(vocab_str),
header=None,
names=['col1', 'count'])
# vocabs are sorted by count only
col1_vocab = vocab['col1'].tolist()
self.assertItemsEqual(col1_vocab[:2], ['brown', 'quick'])
self.assertItemsEqual(col1_vocab[2:], ['chicken', 'fox', 'the'])
self.assertEqual(vocab['count'].tolist(), [2, 2, 1, 1, 1])
vocab_str = file_io.read_file_to_string(
os.path.join(output_folder,
analyze.constant.VOCAB_ANALYSIS_FILE % 'col2'))
vocab = pd.read_csv(six.StringIO(vocab_str),
header=None,
names=['col2', 'count'])
# vocabs are sorted by count only
col2_vocab = vocab['col2'].tolist()
self.assertItemsEqual(col2_vocab[:2], ['in', 'raining'])
self.assertItemsEqual(col2_vocab[2:], ['kir', 'pdx'])
self.assertEqual(vocab['count'].tolist(), [2, 2, 1, 1])
finally:
shutil.rmtree(output_folder)
示例5: testAtomicWriteStringToFileOverwriteFalse
def testAtomicWriteStringToFileOverwriteFalse(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.atomic_write_string_to_file(file_path, "old", overwrite=False)
with self.assertRaises(errors.AlreadyExistsError):
file_io.atomic_write_string_to_file(file_path, "new", overwrite=False)
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("old", file_contents)
file_io.delete_file(file_path)
file_io.atomic_write_string_to_file(file_path, "new", overwrite=False)
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("new", file_contents)
示例6: __init__
def __init__(self, *args, **kwargs):
super(ApiCompatibilityTest, self).__init__(*args, **kwargs)
golden_update_warning_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(), _UPDATE_WARNING_FILE)
self._update_golden_warning = file_io.read_file_to_string(
golden_update_warning_filename)
test_readme_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(), _TEST_README_FILE)
self._test_readme_message = file_io.read_file_to_string(
test_readme_filename)
示例7: get_model_schema_and_features
def get_model_schema_and_features(model_dir):
"""Get a local model's schema and features config.
Args:
model_dir: local or GCS path of a model.
Returns:
A tuple of schema (list) and features config (dict).
"""
schema_file = os.path.join(model_dir, 'assets.extra', 'schema.json')
schema = json.loads(file_io.read_file_to_string(schema_file))
features_file = os.path.join(model_dir, 'assets.extra', 'features.json')
features_config = json.loads(file_io.read_file_to_string(features_file))
return schema, features_config
示例8: testCopy
def testCopy(self):
file_path = os.path.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
copy_path = os.path.join(self._base_dir, "copy_file")
file_io.copy(file_path, copy_path)
self.assertTrue(file_io.file_exists(copy_path))
self.assertEqual(b"testing", file_io.read_file_to_string(file_path))
示例9: test_numerics
def test_numerics(self):
output_folder = tempfile.mkdtemp()
input_file_path = tempfile.mkstemp(dir=output_folder)[1]
try:
file_io.write_string_to_file(
input_file_path,
'\n'.join(['%s,%s,%s' % (i, 10 * i + 0.5, i + 0.5) for i in range(100)]))
schema = [{'name': 'col1', 'type': 'INTEGER'},
{'name': 'col2', 'type': 'FLOAT'},
{'name': 'col3', 'type': 'FLOAT'}]
features = {'col1': {'transform': 'scale', 'source_column': 'col1'},
'col2': {'transform': 'identity', 'source_column': 'col2'},
'col3': {'transform': 'target'}}
feature_analysis.run_local_analysis(
output_folder, [input_file_path], schema, features)
stats = json.loads(
file_io.read_file_to_string(
os.path.join(output_folder, analyze.constant.STATS_FILE)).decode())
self.assertEqual(stats['num_examples'], 100)
col = stats['column_stats']['col1']
self.assertAlmostEqual(col['max'], 99.0)
self.assertAlmostEqual(col['min'], 0.0)
self.assertAlmostEqual(col['mean'], 49.5)
col = stats['column_stats']['col2']
self.assertAlmostEqual(col['max'], 990.5)
self.assertAlmostEqual(col['min'], 0.5)
self.assertAlmostEqual(col['mean'], 495.5)
finally:
shutil.rmtree(output_folder)
示例10: load_model
def load_model(saved_model_path):
"""Load a keras.Model from SavedModel.
load_model reinstantiates model state by:
1) loading model topology from json (this will eventually come
from metagraph).
2) loading model weights from checkpoint.
Args:
saved_model_path: a string specifying the path to an existing SavedModel.
Returns:
a keras.Model instance.
"""
# restore model topology from json string
model_json_filepath = os.path.join(
compat.as_bytes(saved_model_path),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_JSON))
model_json = file_io.read_file_to_string(model_json_filepath)
model = model_from_json(model_json)
# restore model weights
checkpoint_prefix = os.path.join(
compat.as_text(saved_model_path),
compat.as_text(constants.VARIABLES_DIRECTORY),
compat.as_text(constants.VARIABLES_FILENAME))
model.load_weights(checkpoint_prefix)
return model
示例11: _read_file
def _read_file(filename):
"""Reads a file containing `GraphDef` and returns the protocol buffer.
Args:
filename: `graph_def` filename including the path.
Returns:
A `GraphDef` protocol buffer.
Raises:
IOError: If the file doesn't exist, or cannot be successfully parsed.
"""
graph_def = graph_pb2.GraphDef()
if not file_io.file_exists(filename):
raise IOError("File %s does not exist." % filename)
# First try to read it as a binary file.
file_content = file_io.read_file_to_string(filename)
try:
graph_def.ParseFromString(file_content)
return graph_def
except Exception: # pylint: disable=broad-except
pass
# Next try to read it as a text file.
try:
text_format.Merge(file_content.decode("utf-8"), graph_def)
except text_format.ParseError as e:
raise IOError("Cannot parse file %s: %s." % (filename, str(e)))
return graph_def
示例12: testMultipleWrites
def testMultipleWrites(self):
file_path = os.path.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="w") as f:
f.write("line1\n")
f.write("line2")
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("line1\nline2", file_contents)
示例13: run_analysis
def run_analysis(args):
"""Builds an analysis file for training.
Uses BiqQuery tables to do the analysis.
Args:
args: command line args
Raises:
ValueError if schema contains unknown types.
"""
import google.datalab.bigquery as bq
if args.bigquery_table:
table = bq.Table(args.bigquery_table)
schema_list = table.schema._bq_schema
else:
schema_list = json.loads(
file_io.read_file_to_string(args.schema_file).decode())
table = bq.ExternalDataSource(
source=args.input_file_pattern,
schema=bq.Schema(schema_list))
# Check the schema is supported.
for col_schema in schema_list:
col_type = col_schema['type'].lower()
if col_type != 'string' and col_type != 'integer' and col_type != 'float':
raise ValueError('Schema contains an unsupported type %s.' % col_type)
run_numerical_analysis(table, schema_list, args)
run_categorical_analysis(table, schema_list, args)
# Save a copy of the schema to the output location.
file_io.write_string_to_file(
os.path.join(args.output_dir, SCHEMA_FILE),
json.dumps(schema_list, indent=2, separators=(',', ': ')))
示例14: testFileWrite
def testFileWrite(self):
file_path = os.path.join(self.get_temp_dir(), "temp_file")
file_io.write_string_to_file(file_path, "testing")
self.assertTrue(file_io.file_exists(file_path))
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual(b"testing", file_contents)
file_io.delete_file(file_path)
示例15: _GetBaseApiMap
def _GetBaseApiMap(self):
"""Get a map from graph op name to its base ApiDef.
Returns:
Dictionary mapping graph op name to corresponding ApiDef.
"""
# Convert base ApiDef in Multiline format to Proto format.
converted_base_api_dir = os.path.join(
test.get_temp_dir(), 'temp_base_api_defs')
subprocess.check_call(
[os.path.join(resource_loader.get_root_dir_with_all_resources(),
_CONVERT_FROM_MULTILINE_SCRIPT),
_BASE_API_DIR, converted_base_api_dir])
name_to_base_api_def = {}
base_api_files = file_io.get_matching_files(
os.path.join(converted_base_api_dir, 'api_def_*.pbtxt'))
for base_api_file in base_api_files:
if file_io.file_exists(base_api_file):
api_defs = api_def_pb2.ApiDefs()
text_format.Merge(
file_io.read_file_to_string(base_api_file), api_defs)
for api_def in api_defs.op:
name_to_base_api_def[api_def.graph_op_name] = api_def
return name_to_base_api_def