本文整理匯總了Python中data.get_dataset方法的典型用法代碼示例。如果您正苦於以下問題:Python data.get_dataset方法的具體用法?Python data.get_dataset怎麽用?Python data.get_dataset使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類data
的用法示例。
在下文中一共展示了data.get_dataset方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_get_dataset_raw
# 需要導入模塊: import data [as 別名]
# 或者: from data import get_dataset [as 別名]
def test_get_dataset_raw(self):
with self.test_session():
test_image1 = tf.constant(np.arange(4 * 4 * 3), shape=[4, 4, 3], dtype=tf.uint8)
encoded = tf.image.encode_png(test_image1)
image = encoded.eval()
print(os.getcwd())
with open(os.path.join("test_files", "test1.png"), "wb") as f:
f.write(image)
test_image2 = tf.constant(np.flip(np.arange(4 * 4 * 3), axis=0), shape=[4, 4, 3], dtype=tf.uint8)
encoded = tf.image.encode_png(test_image2)
image = encoded.eval()
with open(os.path.join("test_files", "test2.png"), "wb") as f:
f.write(image)
files = glob.glob(os.path.join("test_files", "test*.png"))
dataset = get_dataset(files)
it = dataset.make_one_shot_iterator()
self.assertAllClose(it.get_next(), test_image1)
self.assertAllClose(it.get_next(), test_image2)
示例2: load_ae
# 需要導入模塊: import data [as 別名]
# 或者: from data import get_dataset [as 別名]
def load_ae(path, target_dataset, batch, all_aes, return_dataset=False):
r_param = re.compile('(?P<name>[a-zA-Z][a-z_]*)(?P<value>(True)|(False)|(\d+(\.\d+)?(,\d+)*))')
folders = [x for x in os.path.abspath(path).split('/') if x]
dataset = folders[-2]
if dataset != target_dataset:
tf.logging.log(tf.logging.WARN,
'Mismatched datasets between classfier and AE (%s, %s)',
target_dataset, dataset)
class_name, argpairs = folders[-1].split('_', 1)
params = {}
for x in r_param.findall(argpairs):
name, value = x[:2]
if ',' in value:
pass
elif value in ('True', 'False'):
value = dict(True=True, False=False)[value]
elif '.' in value:
value = float(value)
else:
value = int(value)
params[name] = value
class_ = all_aes[class_name]
dataset = data.get_dataset(dataset, dict(batch_size=batch))
ae = class_(dataset, '/' + os.path.join(*(folders[:-2])), **params)
if return_dataset:
return ae, dataset
else:
return ae, folders[-1]
示例3: test_get_dataset_tfrecords
# 需要導入模塊: import data [as 別名]
# 或者: from data import get_dataset [as 別名]
def test_get_dataset_tfrecords(self):
with self.test_session():
test_image1 = tf.constant(np.arange(4 * 4 * 3), shape=[4, 4, 3], dtype=tf.uint8)
test_image2 = tf.constant(np.flip(np.arange(4 * 4 * 3), axis=0), shape=[4, 4, 3], dtype=tf.uint8)
writer = tf.python_io.TFRecordWriter(os.path.join("test_files", "test.tfrecords"))
testimage1_bytes_list = tf.train.BytesList(value=[test_image1.eval().tobytes()])
example1 = tf.train.Example(
features=tf.train.Features(
feature={'data': tf.train.Feature(bytes_list=testimage1_bytes_list),
'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=[4, 4, 3]))}
)
)
testimage2_bytes_list = tf.train.BytesList(value=[test_image2.eval().tobytes()])
example2 = tf.train.Example(
features=tf.train.Features(
feature={'data': tf.train.Feature(bytes_list=testimage2_bytes_list),
'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=[4, 4, 3]))}
)
)
writer.write(example1.SerializeToString())
writer.write(example2.SerializeToString())
writer.close()
files = glob.glob(os.path.join("test_files", "*.tfrecords"))
dataset = get_dataset(files)
it = dataset.make_one_shot_iterator()
self.assertAllClose(it.get_next(), test_image1)
self.assertAllClose(it.get_next(), test_image2)
示例4: test_preprocess_dataset_batch2_float_raw
# 需要導入模塊: import data [as 別名]
# 或者: from data import get_dataset [as 別名]
def test_preprocess_dataset_batch2_float_raw(self):
with self.test_session():
test_image1 = tf.constant(np.arange(4 * 4 * 3), shape=[4, 4, 3], dtype=tf.uint8)
test_image2 = tf.constant(np.flip(np.arange(4 * 4 * 3), axis=0), shape=[4, 4, 3], dtype=tf.uint8)
writer = tf.python_io.TFRecordWriter(os.path.join("test_files", "test.tfrecords"))
testimage1_bytes_list = tf.train.BytesList(value=[test_image1.eval().tobytes()])
example1 = tf.train.Example(
features=tf.train.Features(
feature={'data': tf.train.Feature(bytes_list=testimage1_bytes_list),
'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=[4, 4, 3]))}
)
)
testimage2_bytes_list = tf.train.BytesList(value=[test_image2.eval().tobytes()])
example2 = tf.train.Example(
features=tf.train.Features(
feature={'data': tf.train.Feature(bytes_list=testimage2_bytes_list),
'shape': tf.train.Feature(int64_list=tf.train.Int64List(value=[4, 4, 3]))}
)
)
writer.write(example1.SerializeToString())
writer.write(example2.SerializeToString())
writer.close()
files = glob.glob(os.path.join("test_files", "*.tfrecords"))
dataset = get_dataset(files)
dataset = preprocess_dataset(dataset, size=[64, 64], batch_size=2,
float_pixels=True)
it = dataset.make_one_shot_iterator()
data = it.get_next().eval()
self.assertEqual(data.shape, (2, 64, 64, 3))
self.assertAllClose(max(data.flatten()), max(test_image1.eval().flatten()) / 127.5 - 1.)
self.assertAllClose(min(data.flatten()), min(test_image1.eval().flatten()) / 127.5 - 1.)
示例5: test_preprocess_dataset_batch2_float_tfrecord
# 需要導入模塊: import data [as 別名]
# 或者: from data import get_dataset [as 別名]
def test_preprocess_dataset_batch2_float_tfrecord(self):
with self.test_session():
test_image1 = tf.constant(np.arange(4 * 4 * 3) * 5, shape=[4, 4, 3], dtype=tf.uint8)
encoded = tf.image.encode_png(test_image1)
image1 = encoded.eval()
with open(os.path.join("test_files", "test1.png"), "wb") as f:
f.write(image1)
test_image2 = tf.constant(np.flip(np.arange(4 * 4 * 3) * 5, axis=0), shape=[4, 4, 3],
dtype=tf.uint8)
encoded = tf.image.encode_png(test_image2)
image2 = encoded.eval()
with open(os.path.join("test_files", "test2.png"), "wb") as f:
f.write(image2)
files = glob.glob(os.path.join("test_files", "test*.png"))
dataset = get_dataset(files)
dataset = preprocess_dataset(dataset, size=[64, 64], batch_size=2,
float_pixels=True)
it = dataset.make_one_shot_iterator()
data = it.get_next().eval()
self.assertEqual(data.shape, (2, 64, 64, 3))
self.assertAllClose(max(data.flatten()), max(test_image1.eval().flatten()) / 127.5 - 1.)
self.assertAllClose(min(data.flatten()), min(test_image1.eval().flatten()) / 127.5 - 1.)
示例6: build_data_iterator
# 需要導入模塊: import data [as 別名]
# 或者: from data import get_dataset [as 別名]
def build_data_iterator(hps, files, current_res_h, current_res_w, batch_size=None, label_list=None,
num_shards=None, shard_index=None):
random.shuffle(files)
dataset = get_dataset(files, current_res_h, current_res_w, hps.epochs_per_res, batch_size,
label_list=label_list, num_shards=None, shard_index=None)
it = dataset.make_one_shot_iterator()
return it
示例7: get_dt
# 需要導入模塊: import data [as 別名]
# 或者: from data import get_dataset [as 別名]
def get_dt(filename, dataset):
dt = pd.read_csv(filename)
_, _, _, y_test = data.get_dataset(dataset)
pd_y_test = pd.DataFrame({'TrueIndex': y_test.argmax(1)})
return preprocess_summary_file(dt, pd_y_test)