本文整理汇总了Python中config.result_dir方法的典型用法代码示例。如果您正苦于以下问题:Python config.result_dir方法的具体用法?Python config.result_dir怎么用?Python config.result_dir使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类config
的用法示例。
在下文中一共展示了config.result_dir方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: locate_result_subdir
# 需要导入模块: import config [as 别名]
# 或者: from config import result_dir [as 别名]
def locate_result_subdir(run_id_or_result_subdir):
if isinstance(run_id_or_result_subdir, str) and os.path.isdir(run_id_or_result_subdir):
return run_id_or_result_subdir
searchdirs = []
searchdirs += ['']
searchdirs += ['results']
searchdirs += ['networks']
for searchdir in searchdirs:
dir = config.result_dir if searchdir == '' else os.path.join(config.result_dir, searchdir)
dir = os.path.join(dir, str(run_id_or_result_subdir))
if os.path.isdir(dir):
return dir
prefix = '%03d' % run_id_or_result_subdir if isinstance(run_id_or_result_subdir, int) else str(run_id_or_result_subdir)
dirs = sorted(glob.glob(os.path.join(config.result_dir, searchdir, prefix + '-*')))
dirs = [dir for dir in dirs if os.path.isdir(dir)]
if len(dirs) == 1:
return dirs[0]
raise IOError('Cannot locate result subdir for run', run_id_or_result_subdir)
示例2: generate_fake_images
# 需要导入模块: import config [as 别名]
# 或者: from config import result_dir [as 别名]
def generate_fake_images(run_id, snapshot=None, grid_size=[1,1], num_pngs=1, image_shrink=1, png_prefix=None, random_seed=1000, minibatch_size=8):
network_pkl = misc.locate_network_pkl(run_id, snapshot)
if png_prefix is None:
png_prefix = misc.get_id_string_for_network_pkl(network_pkl) + '-'
random_state = np.random.RandomState(random_seed)
print('Loading network from "%s"...' % network_pkl)
G, D, Gs = misc.load_network_pkl(run_id, snapshot)
result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
for png_idx in range(num_pngs):
print('Generating png %d / %d...' % (png_idx, num_pngs))
latents = misc.random_latents(np.prod(grid_size), Gs, random_state=random_state)
labels = np.zeros([latents.shape[0], 0], np.float32)
images = Gs.run(latents, labels, minibatch_size=minibatch_size, num_gpus=config.num_gpus, out_mul=127.5, out_add=127.5, out_shrink=image_shrink, out_dtype=np.uint8)
misc.save_image_grid(images, os.path.join(result_subdir, '%s%06d.png' % (png_prefix, png_idx)), [0,255], grid_size)
open(os.path.join(result_subdir, '_done.txt'), 'wt').close()
#----------------------------------------------------------------------------
# Generate MP4 video of random interpolations using a previously trained network.
# To run, uncomment the appropriate line in config.py and launch train.py.
示例3: locate_result_subdir
# 需要导入模块: import config [as 别名]
# 或者: from config import result_dir [as 别名]
def locate_result_subdir(run_id):
if isinstance(run_id, str) and os.path.isdir(run_id):
return run_id
searchdirs = []
searchdirs += ['.']
searchdirs += ['results']
searchdirs += ['networks']
import config
for searchdir in searchdirs:
dir = os.path.join(config.result_dir, searchdir, str(run_id))
if os.path.isdir(dir):
return dir
dirs = glob.glob(os.path.join(config.result_dir, searchdir, '%s-*' % str(run_id)))
if len(dirs) == 1 and os.path.isdir(dirs[0]):
return dirs[0]
raise IOError('Cannot locate result subdir for run', run_id)
示例4: create_result_subdir
# 需要导入模块: import config [as 别名]
# 或者: from config import result_dir [as 别名]
def create_result_subdir(result_dir, run_desc):
# Select run ID and create subdir.
while True:
run_id = 0
for fname in glob.glob(os.path.join(result_dir, '*')):
try:
fbase = os.path.basename(fname)
ford = int(fbase[:fbase.find('-')])
run_id = max(run_id, ford + 1)
except ValueError:
pass
result_subdir = os.path.join(result_dir, '%03d-%s' % (run_id, run_desc))
try:
os.makedirs(result_subdir)
break
except OSError:
if os.path.isdir(result_subdir):
continue
raise
print ("Saving results to", result_subdir)
return result_subdir
示例5: locate_run_dir
# 需要导入模块: import config [as 别名]
# 或者: from config import result_dir [as 别名]
def locate_run_dir(run_id_or_run_dir):
if isinstance(run_id_or_run_dir, str):
if os.path.isdir(run_id_or_run_dir):
return run_id_or_run_dir
converted = dnnlib.submission.submit.convert_path(run_id_or_run_dir)
if os.path.isdir(converted):
return converted
run_dir_pattern = re.compile('^0*%s-' % str(run_id_or_run_dir))
for search_dir in ['']:
full_search_dir = config.result_dir if search_dir == '' else os.path.normpath(os.path.join(config.result_dir, search_dir))
run_dir = os.path.join(full_search_dir, str(run_id_or_run_dir))
if os.path.isdir(run_dir):
return run_dir
run_dirs = sorted(glob.glob(os.path.join(full_search_dir, '*')))
run_dirs = [run_dir for run_dir in run_dirs if run_dir_pattern.match(os.path.basename(run_dir))]
run_dirs = [run_dir for run_dir in run_dirs if os.path.isdir(run_dir)]
if len(run_dirs) == 1:
return run_dirs[0]
raise IOError('Cannot locate result subdir for run', run_id_or_run_dir)
示例6: create_result_subdir
# 需要导入模块: import config [as 别名]
# 或者: from config import result_dir [as 别名]
def create_result_subdir(result_dir, desc):
# Select run ID and create subdir.
while True:
run_id = 0
for fname in glob.glob(os.path.join(result_dir, '*')):
try:
fbase = os.path.basename(fname)
ford = int(fbase[:fbase.find('-')])
run_id = max(run_id, ford + 1)
except ValueError:
pass
result_subdir = os.path.join(result_dir, '%03d-%s' % (run_id, desc))
try:
os.makedirs(result_subdir)
break
except OSError:
if os.path.isdir(result_subdir):
continue
raise
print("Saving results to", result_subdir)
set_output_log_file(os.path.join(result_subdir, 'log.txt'))
# Export config.
try:
with open(os.path.join(result_subdir, 'config.txt'), 'wt') as fout:
for k, v in sorted(config.__dict__.items()):
if not k.startswith('_'):
fout.write("%s = %s\n" % (k, str(v)))
except:
pass
return result_subdir
示例7: generate_interpolation_video
# 需要导入模块: import config [as 别名]
# 或者: from config import result_dir [as 别名]
def generate_interpolation_video(run_id, snapshot=None, grid_size=[1,1], image_shrink=1, image_zoom=1, duration_sec=60.0, smoothing_sec=1.0, mp4=None, mp4_fps=30, mp4_codec='libx265', mp4_bitrate='16M', random_seed=1000, minibatch_size=8):
network_pkl = misc.locate_network_pkl(run_id, snapshot)
if mp4 is None:
mp4 = misc.get_id_string_for_network_pkl(network_pkl) + '-lerp.mp4'
num_frames = int(np.rint(duration_sec * mp4_fps))
random_state = np.random.RandomState(random_seed)
print('Loading network from "%s"...' % network_pkl)
G, D, Gs = misc.load_network_pkl(run_id, snapshot)
print('Generating latent vectors...')
shape = [num_frames, np.prod(grid_size)] + Gs.input_shape[1:] # [frame, image, channel, component]
all_latents = random_state.randn(*shape).astype(np.float32)
all_latents = scipy.ndimage.gaussian_filter(all_latents, [smoothing_sec * mp4_fps] + [0] * len(Gs.input_shape), mode='wrap')
all_latents /= np.sqrt(np.mean(np.square(all_latents)))
# Frame generation func for moviepy.
def make_frame(t):
frame_idx = int(np.clip(np.round(t * mp4_fps), 0, num_frames - 1))
latents = all_latents[frame_idx]
labels = np.zeros([latents.shape[0], 0], np.float32)
images = Gs.run(latents, labels, minibatch_size=minibatch_size, num_gpus=config.num_gpus, out_mul=127.5, out_add=127.5, out_shrink=image_shrink, out_dtype=np.uint8)
grid = misc.create_image_grid(images, grid_size).transpose(1, 2, 0) # HWC
if image_zoom > 1:
grid = scipy.ndimage.zoom(grid, [image_zoom, image_zoom, 1], order=0)
if grid.shape[2] == 1:
grid = grid.repeat(3, 2) # grayscale => RGB
return grid
# Generate video.
import moviepy.editor # pip install moviepy
result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
moviepy.editor.VideoClip(make_frame, duration=duration_sec).write_videofile(os.path.join(result_subdir, mp4), fps=mp4_fps, codec='libx264', bitrate=mp4_bitrate)
open(os.path.join(result_subdir, '_done.txt'), 'wt').close()
#----------------------------------------------------------------------------
# Generate MP4 video of training progress for a previous training run.
# To run, uncomment the appropriate line in config.py and launch train.py.
示例8: experiment_tcn
# 需要导入模块: import config [as 别名]
# 或者: from config import result_dir [as 别名]
def experiment_tcn():
from config import result_dir, split_num, tcn_run_num, dataset_name
if dataset_name in ['JIGSAWS_K', 'JIGSAWS_N']:
feature_types = ['sensor']
elif dataset_name == 'GTEA':
feature_types = ['visual']
else:
feature_types = ['sensor', 'visual']
####################################################
for feature_type in feature_types:
tcn_cmd = 'python3 tcn_main.py --feature_type {}'.format(feature_type)
Popen(tcn_cmd, shell=True).wait()
#os.system(tcn_cmd)
# Get Averaged Results: TCN
template = 'tcn_result_{}_run_{}.npy'
tcn_result = np.zeros((tcn_run_num, split_num, 6))
for tcn_run_idx in range(1, 1 + tcn_run_num):
run_result_file = template.format(feature_type, tcn_run_idx)
run_result_file = os.path.join(result_dir, run_result_file)
tcn_result[tcn_run_idx-1,:,:] = np.load(run_result_file)
os.remove(run_result_file)
tcn_result_file = 'tcn_avg_result_{}.npy'.format(feature_type)
tcn_result_file = os.path.join(result_dir, tcn_result_file)
#np.save(tcn_result_file, tcn_result.mean(0).mean(0))
np.save(tcn_result_file, tcn_result)
示例9: __init__
# 需要导入模块: import config [as 别名]
# 或者: from config import result_dir [as 别名]
def __init__(self, num_images, image_shape, image_dtype, minibatch_size):
import config
globals()['MODEL_DIR'] = os.path.join(config.result_dir, '_inception')
self.sess = tf.get_default_session()
_init_inception()
示例10: __init__
# 需要导入模块: import config [as 别名]
# 或者: from config import result_dir [as 别名]
def __init__(self, num_images, image_shape, image_dtype, minibatch_size):
import config
self.network_dir = os.path.join(config.result_dir, '_inception_fid')
self.network_file = check_or_download_inception(self.network_dir)
self.sess = tf.get_default_session()
create_inception_graph(self.network_file)
示例11: main
# 需要导入模块: import config [as 别名]
# 或者: from config import result_dir [as 别名]
def main():
tflib.init_tf()
os.makedirs(config.result_dir, exist_ok=True)
draw_uncurated_result_figure(os.path.join(config.result_dir, 'figure02-uncurated-ffhq.png'), load_Gs(url_ffhq), cx=0, cy=0, cw=1024, ch=1024, rows=3, lods=[0,1,2,2,3,3], seed=5)
draw_style_mixing_figure(os.path.join(config.result_dir, 'figure03-style-mixing.png'), load_Gs(url_ffhq), w=1024, h=1024, src_seeds=[639,701,687,615,2268], dst_seeds=[888,829,1898,1733,1614,845], style_ranges=[range(0,4)]*3+[range(4,8)]*2+[range(8,18)])
draw_noise_detail_figure(os.path.join(config.result_dir, 'figure04-noise-detail.png'), load_Gs(url_ffhq), w=1024, h=1024, num_samples=100, seeds=[1157,1012])
draw_noise_components_figure(os.path.join(config.result_dir, 'figure05-noise-components.png'), load_Gs(url_ffhq), w=1024, h=1024, seeds=[1967,1555], noise_ranges=[range(0, 18), range(0, 0), range(8, 18), range(0, 8)], flips=[1])
draw_truncation_trick_figure(os.path.join(config.result_dir, 'figure08-truncation-trick.png'), load_Gs(url_ffhq), w=1024, h=1024, seeds=[91,388], psis=[1, 0.7, 0.5, 0, -0.5, -1])
draw_uncurated_result_figure(os.path.join(config.result_dir, 'figure10-uncurated-bedrooms.png'), load_Gs(url_bedrooms), cx=0, cy=0, cw=256, ch=256, rows=5, lods=[0,0,1,1,2,2,2], seed=0)
draw_uncurated_result_figure(os.path.join(config.result_dir, 'figure11-uncurated-cars.png'), load_Gs(url_cars), cx=0, cy=64, cw=512, ch=384, rows=4, lods=[0,1,2,2,3,3], seed=2)
draw_uncurated_result_figure(os.path.join(config.result_dir, 'figure12-uncurated-cats.png'), load_Gs(url_cats), cx=0, cy=0, cw=256, ch=256, rows=5, lods=[0,0,1,1,2,2,2], seed=1)
#----------------------------------------------------------------------------
示例12: main
# 需要导入模块: import config [as 别名]
# 或者: from config import result_dir [as 别名]
def main():
submit_config = dnnlib.SubmitConfig()
# Which metrics to evaluate?
metrics = []
metrics += [metric_base.fid50k]
#metrics += [metric_base.ppl_zfull]
#metrics += [metric_base.ppl_wfull]
#metrics += [metric_base.ppl_zend]
#metrics += [metric_base.ppl_wend]
#metrics += [metric_base.ls]
#metrics += [metric_base.dummy]
# Which networks to evaluate them on?
tasks = []
tasks += [EasyDict(run_func_name='run_metrics.run_pickle', network_pkl='https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ', dataset_args=EasyDict(tfrecord_dir='ffhq', shuffle_mb=0), mirror_augment=True)] # karras2019stylegan-ffhq-1024x1024.pkl
#tasks += [EasyDict(run_func_name='run_metrics.run_snapshot', run_id=100, snapshot=25000)]
#tasks += [EasyDict(run_func_name='run_metrics.run_all_snapshots', run_id=100)]
# How many GPUs to use?
submit_config.num_gpus = 1
#submit_config.num_gpus = 2
#submit_config.num_gpus = 4
#submit_config.num_gpus = 8
# Execute.
submit_config.run_dir_root = dnnlib.submission.submit.get_template_from_path(config.result_dir)
submit_config.run_dir_ignore += config.run_dir_ignore
for task in tasks:
for metric in metrics:
submit_config.run_desc = '%s-%s' % (task.run_func_name, metric.name)
if task.run_func_name.endswith('run_snapshot'):
submit_config.run_desc += '-%s-%s' % (task.run_id, task.snapshot)
if task.run_func_name.endswith('run_all_snapshots'):
submit_config.run_desc += '-%s' % task.run_id
submit_config.run_desc += '-%dgpu' % submit_config.num_gpus
dnnlib.submit_run(submit_config, metric_args=metric, **task)
#----------------------------------------------------------------------------
示例13: main
# 需要导入模块: import config [as 别名]
# 或者: from config import result_dir [as 别名]
def main():
# Initialize TensorFlow.
tflib.init_tf()
# Load pre-trained network.
url = 'https://drive.google.com/uc?id=1MEGjdvVpUsu1jB4zrXZN7Y4kBBOzizDQ' # karras2019stylegan-ffhq-1024x1024.pkl
with dnnlib.util.open_url(url, cache_dir=config.cache_dir) as f:
_G, _D, Gs = pickle.load(f)
# _G = Instantaneous snapshot of the generator. Mainly useful for resuming a previous training run.
# _D = Instantaneous snapshot of the discriminator. Mainly useful for resuming a previous training run.
# Gs = Long-term average of the generator. Yields higher-quality results than the instantaneous snapshot.
# Print network details.
Gs.print_layers()
# Pick latent vector.
rnd = np.random.RandomState(5)
latents = rnd.randn(1, Gs.input_shape[1])
# Generate image.
fmt = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)
images = Gs.run(latents, None, truncation_psi=0.7, randomize_noise=True, output_transform=fmt)
# Save image.
os.makedirs(config.result_dir, exist_ok=True)
png_filename = os.path.join(config.result_dir, 'example.png')
PIL.Image.fromarray(images[0], 'RGB').save(png_filename)