本文整理匯總了Python中utils.mkdir方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.mkdir方法的具體用法?Python utils.mkdir怎麽用?Python utils.mkdir使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類utils
的用法示例。
在下文中一共展示了utils.mkdir方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: snapshot
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import mkdir [as 別名]
def snapshot(self, path):
if not os.path.exists(path):
if os.name == 'nt':
os.system('mkdir {}'.format(path.replace('/', '\\')))
else:
os.system('mkdir -p {}'.format(path))
# save every 100 tick if the network is in stab phase.
ndis = 'dis_R{}_T{}.pth.tar'.format(int(floor(self.resl)), self.globalTick)
ngen = 'gen_R{}_T{}.pth.tar'.format(int(floor(self.resl)), self.globalTick)
if self.globalTick%50==0:
if self.phase == 'gstab' or self.phase =='dstab' or self.phase == 'final':
save_path = os.path.join(path, ndis)
if not os.path.exists(save_path):
torch.save(self.get_state('dis'), save_path)
save_path = os.path.join(path, ngen)
torch.save(self.get_state('gen'), save_path)
print('[snapshot] model saved @ {}'.format(path))
示例2: save_results
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import mkdir [as 別名]
def save_results(state, steps, visualize=True, subfolder=''):
if not state.get_output_flag():
logging.warning('Skip saving results because output_flag is False')
return
expr_dir = os.path.join(state.get_save_directory(), subfolder)
utils.mkdir(expr_dir)
save_data_path = os.path.join(expr_dir, 'results.pth')
steps = [(d.detach().cpu(), l.detach().cpu(), lr) for (d, l, lr) in steps]
if visualize:
vis_results(state, steps, expr_dir)
torch.save(steps, save_data_path)
logging.info('Results saved to {}'.format(save_data_path))
示例3: save_test_results
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import mkdir [as 別名]
def save_test_results(state, results):
assert state.phase != 'train'
if not state.get_output_flag():
logging.warning('Skip saving test results because output_flag is False')
return
test_dir = state.get_save_directory()
utils.mkdir(test_dir)
result_file = os.path.join(test_dir, 'results.pth')
torch.save(results, os.path.join(test_dir, 'results.pth'))
logging.info('Test results saved as {}'.format(result_file))
示例4: test_workteamjob
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import mkdir [as 別名]
def test_workteamjob(
kfp_client, experiment_id, region, sagemaker_client, test_file_dir
):
download_dir = utils.mkdir(os.path.join(test_file_dir + "/generated"))
workteam_name, workflow_json = create_workteamjob(
kfp_client, experiment_id, region, sagemaker_client, test_file_dir, download_dir
)
outputs = {"sagemaker-private-workforce": ["workteam_arn"]}
try:
output_files = minio_utils.artifact_download_iterator(
workflow_json, outputs, download_dir
)
response = sagemaker_utils.describe_workteam(sagemaker_client, workteam_name)
# Verify WorkTeam was created in SageMaker
assert response["Workteam"]["CreateDate"] is not None
assert response["Workteam"]["WorkteamName"] == workteam_name
# Verify WorkTeam arn artifact was created in Minio and matches the one in SageMaker
workteam_arn = utils.read_from_file_in_tar(
output_files["sagemaker-private-workforce"]["workteam_arn"],
"workteam_arn.txt",
)
assert response["Workteam"]["WorkteamArn"] == workteam_arn
finally:
# Cleanup the SageMaker Resources
sagemaker_utils.delete_workteam(sagemaker_client, workteam_name)
# Delete generated files only if the test is successful
utils.remove_dir(download_dir)
示例5: generate_frames
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import mkdir [as 別名]
def generate_frames(loc,start_idx,end_idx):
# get frames for each video clip
# loc | the location of video clip
# v_name | v_name = 'clip_video_train'
# start_idx | the starting index of the training sample
# end_idx | the ending index of the training sample
utils.mkdir('frames')
for i in range(start_idx, end_idx):
command = 'cd %s;' % loc
f_name = str(i)
command += 'ffmpeg -i %s.mp4 -y -f image2 -vframes 75 ../frames/%s-%%02d.jpg' % (f_name, f_name)
os.system(command)
示例6: download_video_frames
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import mkdir [as 別名]
def download_video_frames(loc,d_csv,start_idx,end_idx,rm_video):
# Download each video and convert to frames immediately, can choose to remove video file
# loc | the location for downloaded file
# cat | the catalog with audio link and time
# start_idx | the starting index of the video to download
# end_idx | the ending index of the video to download
# rm_video | boolean value for delete video and only keep the frames
utils.mkdir('frames')
for i in range(start_idx, end_idx):
command = 'cd %s;' % loc
f_name = str(i)
link = "https://www.youtube.com/watch?v="+d_csv.loc[i][0]
start_time = d_csv.loc[i][1]
#start_time = 90
start_time = time.strftime("%H:%M:%S.0",time.gmtime(start_time))
command += 'youtube-dl --prefer-ffmpeg -f "mp4" -o o' + f_name + '.mp4 ' + link + ';'
command += 'ffmpeg -i o'+f_name+'.mp4'+' -c:v h264 -c:a copy -ss '+str(start_time)+' -t '+"3 "+f_name+'.mp4;'
command += 'rm o%s.mp4;' % f_name
#ommand += 'ffmpeg -i %s.mp4 -r 25 %s.mp4;' % (f_name, 'clip_' + f_name) # convert fps to 25
#command += 'rm %s.mp4;' % f_name
#converts to frames
#command += 'ffmpeg -i %s.mp4 -y -f image2 -vframes 75 ../frames/%s-%%02d.jpg;' % (f_name, f_name)
command += 'ffmpeg -i %s.mp4 -vf fps=25 ../frames/%s-%%02d.jpg;' % (f_name, f_name)
#command += 'ffmpeg -i %s.mp4 ../frames/%sfr_%%02d.jpg;' % ('clip_' + f_name, f_name)
if rm_video:
command += 'rm %s.mp4;' % f_name
os.system(command)
print("\r Process video... ".format(i) + str(i), end="")
print("\r Finish !!", end="")
示例7: __init__
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import mkdir [as 別名]
def __init__(self):
utils.mkdir('repo/tensorboard')
for i in range(1000):
self.targ = 'repo/tensorboard/try_{}'.format(i)
if not os.path.exists(self.targ):
self.writer = SummaryWriter(self.targ)
break
示例8: download
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import mkdir [as 別名]
def download(
url_entry,
scraper=args.scraper,
save_uncompressed=args.save_uncompressed,
memoize=args.scraper_memoize,
):
uid, url = url_entry
url = url.strip()
fid = "{:07d}-{}".format(uid, md5(url.encode()).hexdigest())
# is_good_link, link_type = vet_link(url)
# if not is_good_link:
# return
if scraper == "bs4":
scrape = bs4_scraper
elif scraper == "newspaper":
scrape = newspaper_scraper
elif scraper == "raw":
scrape = raw_scraper
text, meta = scrape(url, memoize)
if text is None or text.strip() == "":
return ("", "", fid, uid)
if save_uncompressed:
month = extract_month(args.url_file)
data_dir = mkdir(op.join(args.output_dir, "data", month))
meta_dir = mkdir(op.join(args.output_dir, "meta", month))
text_fp = op.join(data_dir, "{}.txt".format(fid))
meta_fp = op.join(meta_dir, "{}.json".format(fid))
with open(text_fp, "w") as out:
out.write(text)
with open(meta_fp, "w") as out:
json.dump(meta, out)
return (text, meta, fid, uid)
示例9: archive_chunk
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import mkdir [as 別名]
def archive_chunk(month, cid, cdata, out_dir, fmt):
mkdir(out_dir)
texts, metas, fids, uids = zip(*cdata)
data_tar = op.join(out_dir, "{}-{}_data.{}".format(month, cid, fmt))
meta_tar = op.join(out_dir, "{}-{}_meta.{}".format(month, cid, fmt))
tar_fps, texts, exts = [data_tar, meta_tar], [texts, metas], ["txt", "json"]
doc_count = 0
docs_counted = False
for tar_fp, txts, ext in zip(tar_fps, texts, exts):
with tarfile.open(tar_fp, "w:" + fmt) as tar:
for f, fid in zip(txts, fids):
if f == "":
continue
else:
if not docs_counted:
doc_count += 1
if ext == "json":
f = json.dumps(f)
f = f.encode("utf-8")
t = tarfile.TarInfo("{}.{}".format(fid, ext))
t.size = len(f)
tar.addfile(t, io.BytesIO(f))
docs_counted = True
return doc_count
#######################################################################
# Util functions #
#######################################################################
示例10: get_state
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import mkdir [as 別名]
def get_state(month, out_dir):
mkdir("state")
latest_cid = 0
completed_uids = set()
state_fp = op.join("state", "{}.txt".format(month))
if op.isfile(state_fp):
archives = glob(op.join(out_dir, "{}-*".format(month)))
latest_cid = max([int(a.split("-")[-1].split("_")[0]) for a in archives])
with open(state_fp, "r") as fh:
completed_uids = set(int(i.strip()) for i in list(fh))
return completed_uids, state_fp, latest_cid
示例11: _vis_results_fn
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import mkdir [as 別名]
def _vis_results_fn(np_steps, distilled_images_per_class_per_step, dataset_info, arch, dpi,
vis_dir=None, vis_name_fmt='visuals_step{step:03d}',
cmap=None, supertitle=True, subtitle=True, fontsize=None,
reuse_axes=True):
if vis_dir is None:
logging.warning('Not saving because vis_dir is not given')
else:
vis_name_fmt += '.png'
utils.mkdir(vis_dir)
dataset, nc, input_size, mean, std, label_names = dataset_info
N = len(np_steps[0][0])
nrows = max(2, distilled_images_per_class_per_step)
grid = (nrows, np.ceil(N / float(nrows)).astype(int))
plt.rcParams["figure.figsize"] = (grid[1] * 1.5 + 1, nrows * 1.5 + 1)
plt.close('all')
fig, axes = plt.subplots(nrows=grid[0], ncols=grid[1])
axes = axes.flatten()
if supertitle:
fmts = [
'Dataset: {dataset}',
'Arch: {arch}',
]
if len(np_steps) > 1:
fmts.append('Step: {{step}}')
if np_steps[0][-1] is not None:
fmts.append('LR: {{lr:.4f}}')
supertitle_fmt = ', '.join(fmts).format(dataset=dataset, arch=arch)
plt_images = []
first_run = True
for i, (data, labels, lr) in enumerate(np_steps):
for n, (img, label, axis) in enumerate(zip(data, labels, axes)):
if nc == 1:
img = img[..., 0]
img = (img * std + mean).clip(0, 1)
if first_run:
plt_images.append(axis.imshow(img, interpolation='nearest', cmap=cmap))
else:
plt_images[n].set_data(img)
if first_run:
axis.axis('off')
if subtitle:
axis.set_title('Label {}'.format(label_names[label]), fontsize=fontsize)
if supertitle:
if lr is not None:
lr = lr.sum().item()
plt.suptitle(supertitle_fmt.format(step=i, lr=lr), fontsize=fontsize)
if first_run:
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0, rect=[0, 0, 1, 0.95])
fig.canvas.draw()
if vis_dir is not None:
plt.savefig(os.path.join(vis_dir, vis_name_fmt.format(step=i)), dpi=dpi)
if reuse_axes:
first_run = False
else:
fig, axes = plt.subplots(nrows=grid[0], ncols=grid[1])
axes = axes.flatten()
plt.show()
示例12: test_trainingjob
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import mkdir [as 別名]
def test_trainingjob(
kfp_client, experiment_id, region, sagemaker_client, test_file_dir
):
download_dir = utils.mkdir(os.path.join(test_file_dir + "/generated"))
test_params = utils.load_params(
utils.replace_placeholders(
os.path.join(test_file_dir, "config.yaml"),
os.path.join(download_dir, "config.yaml"),
)
)
_, _, workflow_json = kfp_client_utils.compile_run_monitor_pipeline(
kfp_client,
experiment_id,
test_params["PipelineDefinition"],
test_params["Arguments"],
download_dir,
test_params["TestName"],
test_params["Timeout"],
)
outputs = {
"sagemaker-training-job": ["job_name", "model_artifact_url", "training_image"]
}
output_files = minio_utils.artifact_download_iterator(
workflow_json, outputs, download_dir
)
# Verify Training job was successful on SageMaker
training_job_name = utils.read_from_file_in_tar(
output_files["sagemaker-training-job"]["job_name"], "job_name.txt"
)
print(f"training job name: {training_job_name}")
train_response = sagemaker_utils.describe_training_job(
sagemaker_client, training_job_name
)
assert train_response["TrainingJobStatus"] == "Completed"
# Verify model artifacts output was generated from this run
model_artifact_url = utils.read_from_file_in_tar(
output_files["sagemaker-training-job"]["model_artifact_url"],
"model_artifact_url.txt",
)
print(f"model_artifact_url: {model_artifact_url}")
assert model_artifact_url == train_response["ModelArtifacts"]["S3ModelArtifacts"]
assert training_job_name in model_artifact_url
# Verify training image output is an ECR image
training_image = utils.read_from_file_in_tar(
output_files["sagemaker-training-job"]["training_image"], "training_image.txt",
)
print(f"Training image used: {training_image}")
if "ExpectedTrainingImage" in test_params.keys():
assert test_params["ExpectedTrainingImage"] == training_image
else:
assert f"dkr.ecr.{region}.amazonaws.com" in training_image
utils.remove_dir(download_dir)
示例13: test_createmodel
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import mkdir [as 別名]
def test_createmodel(kfp_client, experiment_id, sagemaker_client, test_file_dir):
download_dir = utils.mkdir(os.path.join(test_file_dir + "/generated"))
test_params = utils.load_params(
utils.replace_placeholders(
os.path.join(test_file_dir, "config.yaml"),
os.path.join(download_dir, "config.yaml"),
)
)
# Generate random prefix for model name to avoid errors if model with same name exists
test_params["Arguments"]["model_name"] = input_model_name = (
utils.generate_random_string(5) + "-" + test_params["Arguments"]["model_name"]
)
print(f"running test with model_name: {input_model_name}")
_, _, workflow_json = kfp_client_utils.compile_run_monitor_pipeline(
kfp_client,
experiment_id,
test_params["PipelineDefinition"],
test_params["Arguments"],
download_dir,
test_params["TestName"],
test_params["Timeout"],
)
outputs = {"sagemaker-create-model": ["model_name"]}
output_files = minio_utils.artifact_download_iterator(
workflow_json, outputs, download_dir
)
output_model_name = utils.read_from_file_in_tar(
output_files["sagemaker-create-model"]["model_name"], "model_name.txt"
)
print(f"model_name: {output_model_name}")
assert output_model_name == input_model_name
assert (
sagemaker_utils.describe_model(sagemaker_client, input_model_name) is not None
)
utils.remove_dir(download_dir)
示例14: validate
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import mkdir [as 別名]
def validate(opts, model, loader, device, metrics, ret_samples_ids=None):
"""Do validation and return specified samples"""
metrics.reset()
ret_samples = []
if opts.save_val_results:
if not os.path.exists('results'):
os.mkdir('results')
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
img_id = 0
with torch.no_grad():
for i, (images, labels) in tqdm(enumerate(loader)):
images = images.to(device, dtype=torch.float32)
labels = labels.to(device, dtype=torch.long)
outputs = model(images)
preds = outputs.detach().max(dim=1)[1].cpu().numpy()
targets = labels.cpu().numpy()
metrics.update(targets, preds)
if ret_samples_ids is not None and i in ret_samples_ids: # get vis samples
ret_samples.append(
(images[0].detach().cpu().numpy(), targets[0], preds[0]))
if opts.save_val_results:
for i in range(len(images)):
image = images[i].detach().cpu().numpy()
target = targets[i]
pred = preds[i]
image = (denorm(image) * 255).transpose(1, 2, 0).astype(np.uint8)
target = loader.dataset.decode_target(target).astype(np.uint8)
pred = loader.dataset.decode_target(pred).astype(np.uint8)
Image.fromarray(image).save('results/%d_image.png' % img_id)
Image.fromarray(target).save('results/%d_target.png' % img_id)
Image.fromarray(pred).save('results/%d_pred.png' % img_id)
fig = plt.figure()
plt.imshow(image)
plt.axis('off')
plt.imshow(pred, alpha=0.7)
ax = plt.gca()
ax.xaxis.set_major_locator(matplotlib.ticker.NullLocator())
ax.yaxis.set_major_locator(matplotlib.ticker.NullLocator())
plt.savefig('results/%d_overlay.png' % img_id, bbox_inches='tight', pad_inches=0)
plt.close()
img_id += 1
score = metrics.get_results()
return score, ret_samples