本文整理汇总了Python中utils.mkdir_p方法的典型用法代码示例。如果您正苦于以下问题:Python utils.mkdir_p方法的具体用法?Python utils.mkdir_p怎么用?Python utils.mkdir_p使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类utils
的用法示例。
在下文中一共展示了utils.mkdir_p方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_skeleton
# 需要导入模块: import utils [as 别名]
# 或者: from utils import mkdir_p [as 别名]
def create_skeleton(self):
"""
Create the role's directory and file structure.
"""
utils.string_to_file(os.path.join(self.output_path, "VERSION"),
"master\n")
for folder in c.ANSIBLE_FOLDERS:
create_folder_path = os.path.join(self.output_path, folder)
utils.mkdir_p(create_folder_path)
mainyml_template = default_mainyml_template.replace(
"%role_name", self.role_name)
mainyml_template = mainyml_template.replace(
"%values", folder)
out_path = os.path.join(create_folder_path, "main.yml")
if folder not in ("templates", "meta", "tests", "files"):
utils.string_to_file(out_path, mainyml_template)
if folder == "meta":
utils.create_meta_main(out_path,
self.config, self.role_name,
self.options.galaxy_categories)
示例2: run_epicFlow_sequence
# 需要导入模块: import utils [as 别名]
# 或者: from utils import mkdir_p [as 别名]
def run_epicFlow_sequence(imPathList, flowdir, deepmatchdir, vizFlow=False):
"""
Run EpicFlow Code on a sequence of images of video to obtain tracks.
To be run after epic flow code.
"""
fList = []
if os.path.isdir(flowdir):
fList = utils.read_r(flowdir, '*.flo')
if not len(fList) == len(imPathList) - 1 or len(fList) == 0:
utils.mkdir_p(flowdir)
if vizFlow:
utils.mkdir_p(flowdir + '/viz_flow/')
for i in range(len(imPathList) - 1):
deepmatchfile = deepmatchdir + 'match_%04d.txt' % i
run_epicFlow_pair(
imPathList[i], imPathList[i + 1], flowdir, deepmatchfile, i,
vizFlow)
sys.stdout.write('Pairwise EpicFlow: [% 5.1f%%]\r' %
(100.0 * float(i / len(imPathList))))
sys.stdout.flush()
if vizFlow:
import subprocess
subprocess.call(['tar', '-zcf', flowdir + '/viz_flow.tar.gz', '-C',
flowdir + '/viz_flow', '.'])
print('Pairwise EpicFlow completed.')
else:
print('Pairwise EpicFlows already present in outdir. Using them.')
示例3: run_pre_homography
# 需要导入模块: import utils [as 别名]
# 或者: from utils import mkdir_p [as 别名]
def run_pre_homography(outdir, matchdir, homTh, imH, imW, dmThresh,
imPathList, vizHomo):
"""
Run per frame homogrpahy on matching files of deepmatch or epic flow
"""
if homTh < 0:
return
utils.rmdir_f(outdir)
utils.mkdir_p(outdir)
mList = utils.read_r(matchdir, '*.txt')
if vizHomo:
col = np.array([255, 0, 0], dtype='int')
utils.mkdir_p(outdir + '/viz_homo/')
for i in range(len(mList)):
matches = read_dmOutput(mList[i], imH, imW, dmThresh, False)
matches = frame_homography(matches, homTh)
# fit to coordinates to image size
matches = np.minimum(matches, np.array([imW, imH, imW, imH]) - 1)
matches = np.maximum(matches, np.array([0]))
matchfile = outdir + 'match_%04d.txt' % i
np.savetxt(matchfile, matches, fmt='%d')
if matches.size > 0 and vizHomo:
im = np.array(Image.open(imPathList[i]))
im = Image.fromarray(utils.draw_point_im(
im, matches[:, [1, 0]], col, sizeOut=10))
im.save(outdir + '/viz_homo/%s' % (imPathList[i].split('/')[-1]))
sys.stdout.write('Pairwise pre-tracking homogrpahy: [% 5.1f%%]\r' %
(100.0 * float(i / len(mList))))
sys.stdout.flush()
import subprocess
subprocess.call(['tar', '-zcf', outdir + '/viz_homo.tar.gz', '-C',
outdir + '/viz_homo', '.'])
print('Pairwise pre-tracking homogrpahy completed.')
示例4: demo_videos
# 需要导入模块: import utils [as 别名]
# 或者: from utils import mkdir_p [as 别名]
def demo_videos():
"""
Demo of dm_tracker on imagenet videos
"""
args = parse_args()
np.random.seed(args.seed)
vidDir = '/home/dpathak/local/data/trash/videos'
imDir = vidDir + '_im'
vidPathList = utils.read_r(vidDir, '*.mp4')
# vidPathList = vidPathList[5:]
utils.mkdir_p(imDir)
for i in range(len(vidPathList)):
print('Video: ', vidPathList[i])
tmpDir = imDir + '/' + vidPathList[i].split('/')[-1][:-4]
utils.mkdir_p(tmpDir)
# imSeq = utils.vid2im(vidPathList[i])
# assert imSeq.size > 0, "Error reading video file"
# for j in range(imSeq.shape[0]):
# Image.fromarray(imSeq[j]).save(tmpDir + '/frame_%05d.jpg' % j)
imPathList = utils.read_r(tmpDir, '*.jpg')
if len(imPathList) < 2:
print('Not enough images in image directory: \n%s' % tmpDir)
print('Continuing to next one ...')
continue
outdir = tmpDir.split('/')
outdir[-2] = outdir[-2][:-3] + '_result'
outdir = '/'.join(outdir)
run_dm_sequence(outdir, imPathList, args.frameGap, args.dmThresh,
args.matchNbr, args.shotFrac, args.postTrackHomTh,
args.preTrackHomTh, args.use_epic,
args.vizFlow, args.vizTr, args.cpysrc)
示例5: create_test_case
# 需要导入模块: import utils [as 别名]
# 或者: from utils import mkdir_p [as 别名]
def create_test_case(self):
"""
Create a test case.
"""
testyml_template = default_testyml_template.replace(
"%role", self.normalized_role)
testyml_template = testyml_template.replace(
"%year", str(date.today().year))
testyml_template = testyml_template.replace(
"%author", self.config["author_name"])
testyml_template = testyml_template.replace(
"%email", self.config["author_email"])
utils.mkdir_p(os.path.join(self.output_path,
"tests", "inventory", "group_vars"))
utils.mkdir_p(os.path.join(self.output_path,
"tests", "inventory", "host_vars"))
hosts = "placeholder_fqdn\n"
utils.string_to_file(os.path.join(self.output_path,
"tests", "inventory", "hosts"),
hosts)
test_file = os.path.join(self.output_path, "tests", "test")
utils.string_to_file(test_file, testyml_template)
os.chmod(test_file, 0755)
示例6: archive_database
# 需要导入模块: import utils [as 别名]
# 或者: from utils import mkdir_p [as 别名]
def archive_database(cfg):
dbs_to_archive = get_db_names_to_archive(cfg.lang_code)
archive_dir = os.path.join(cfg.archive_dir, cfg.lang_code)
if cfg.archive_duration_days > 0:
delete_old_archives(archive_dir, cfg.archive_duration_days)
utils.mkdir_p(archive_dir)
now = datetime.datetime.now()
output = os.path.join(archive_dir, now.strftime('%Y%m%d-%H%M.sql.gz'))
print('Archiving the current database', file=sys.stderr)
return shell(
'mysqldump --defaults-file="%s" --host=%s --databases %s | '
'gzip > %s' % (chdb.REPLICA_MY_CNF, chdb.TOOLS_LABS_CH_MYSQL_HOST,
' '.join(dbs_to_archive), output))