本文整理汇总了Python中shutil.rmtree函数的典型用法代码示例。如果您正苦于以下问题:Python rmtree函数的具体用法?Python rmtree怎么用?Python rmtree使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了rmtree函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: remove
def remove(self, ignore_errors=False):
# Think about ignore_errors
stream_logger.info(" - %s" % self.name)
# If archive not already extract
if not os.path.exists("%s/%s" % (conf.get("settings", "cache"), self.name)):
self.unarchive()
self.import_control()
# Pre Remove
stream_logger.info(" | Pre Remove")
self.control.pre_remove()
# Remove
stream_logger.info(" | Remove")
files_list = open(os.path.join(conf.get("settings", "cache"), self.name, "files.lst")).readlines()
for _file in files_list:
try:
os.remove(os.path.join(conf.get("settings", "packages"), _file.replace("\n", "")))
except:
pass
# Post Remove
stream_logger.info(" | Post Remove")
self.control.post_remove()
stream_logger.info(" | Clean")
shutil.rmtree(os.path.join(conf.get("settings", "cache"), self.name))
示例2: copy_template
def copy_template():
config_prompt(template)
shutil.copytree(template, name)
if os.path.exists('%s/%s' % (name, 'config.yaml')):
os.remove('%s/%s' % (name, 'config.yaml'))
for dirname, dirnames, files in os.walk(name):
for d in dirnames:
if d == options.template:
shutil.copytree('%s/%s' % (dirname, d), '%s/%s' % (dirname, name))
shutil.rmtree('%s/%s' % (dirname, d))
for dirname, dirnames, files in os.walk(name):
for filename in files:
f = open('%s/%s' % (dirname, filename), 'r')
lines = f.readlines()
f.close()
first_pass = [re.sub('{{\s*(\w+)\s*}}', replace_variable, line) for line in lines]
new_lines = [re.sub('__config_(\w+)__', replace_variable, line) for line in first_pass]
f = open('%s/%s' % (dirname, filename), 'w')
f.write(''.join(new_lines))
f.close()
示例3: remove
def remove(path):
if os.path.exists(path):
print ' removing: ', path
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.unlink(path)
示例4: test_compare_values_border_cases
def test_compare_values_border_cases(self):
"""tests the condition where BSR values are near the border regions
differentiated by the function"""
tdir = tempfile.mkdtemp(prefix="filetest_",)
fpath = os.path.join(tdir,"group1_pruned")
fp = open(fpath, "w")
fp.write(" E2348_69_all\n")
fp.write("IpaH3 0.03\n")
fp.write("LT 0.00\n")
fp.write("ST2 0.00\n")
fp.write("bfpB 0.81\n")
fp.write("stx2a 0.07")
fp.close()
npath = os.path.join(tdir,"group2_pruned")
np = open(npath, "w")
np.write(" H10407_all\n")
np.write("IpaH3 0.03\n")
np.write("LT 0.80\n")
np.write("ST2 1.00\n")
np.write("bfpB 0.00\n")
np.write("stx2a 0.79")
np.close()
self.assertEqual(compare_values(fpath, npath, "0.8", "0.4"), ([0.81], [0.80, 1.00], [0.03, 0.0, 0.0, 0.81, 0.07]))
shutil.rmtree(tdir)
os.system("rm group*_out.txt")
示例5: test_run_multiproc_nondaemon_with_flag
def test_run_multiproc_nondaemon_with_flag(nondaemon_flag):
'''
Start a pipe with two nodes using the multiproc plugin and passing the nondaemon_flag.
'''
cur_dir = os.getcwd()
temp_dir = mkdtemp(prefix='test_engine_')
os.chdir(temp_dir)
pipe = pe.Workflow(name='pipe')
f1 = pe.Node(interface=Function(function=TestInterface.testFunction, input_names=['sum'], output_names=['sum_out']), name='f1')
f2 = pe.Node(interface=Function(function=TestInterface.testFunction, input_names=['sum'], output_names=['sum_out']), name='f2')
pipe.connect([(f1,f2,[('sum_out','sum')])])
pipe.base_dir = os.getcwd()
f1.inputs.sum = 0
# execute the pipe using the MultiProc plugin with 2 processes and the non_daemon flag
# to enable child processes which start other multiprocessing jobs
execgraph = pipe.run(plugin="MultiProc", plugin_args={'n_procs':2, 'non_daemon':nondaemon_flag})
names = ['.'.join((node._hierarchy,node.name)) for node in execgraph.nodes()]
node = execgraph.nodes()[names.index('pipe.f2')]
result = node.get_output('sum_out')
yield assert_equal, result, 180 # n_procs (2) * numberOfThreads (2) * 45 == 180
os.chdir(cur_dir)
rmtree(temp_dir)
示例6: build_dist
def build_dist(self):
for sdir in self.staging_dirs:
if os.path.exists(sdir):
shutil.rmtree(sdir)
main_stage, ninja_stage = self.staging_dirs
modules = [os.path.splitext(os.path.split(x)[1])[0] for x in glob(os.path.join('mesonbuild/modules/*'))]
modules = ['mesonbuild.modules.' + x for x in modules if not x.startswith('_')]
modules += ['distutils.version']
modulestr = ','.join(modules)
python = shutil.which('python')
cxfreeze = os.path.join(os.path.dirname(python), "Scripts", "cxfreeze")
if not os.path.isfile(cxfreeze):
print("ERROR: This script requires cx_freeze module")
sys.exit(1)
subprocess.check_call([python,
cxfreeze,
'--target-dir',
main_stage,
'--include-modules',
modulestr,
'meson.py'])
if not os.path.exists(os.path.join(main_stage, 'meson.exe')):
sys.exit('Meson exe missing from staging dir.')
os.mkdir(ninja_stage)
shutil.copy(shutil.which('ninja'), ninja_stage)
if not os.path.exists(os.path.join(ninja_stage, 'ninja.exe')):
sys.exit('Ninja exe missing from staging dir.')
示例7: do_deploy
def do_deploy():
def _run_ansible():
subprocess.check_call(['ansible-playbook', 'masters.yml'],
cwd='result/{}'.format(grid_name))
parent_deployment._status = 'masters_provision_deploying'
provision_deployment._status = 'masters_deploying'
provision_deployment.save()
parent_deployment.save()
provision_generator = provision_generators[grid.type][
grid.provider](grid_name, **kwargs)
provision_generator.generate_all(
grid_name, infrastructure_deployment._accessip)
try:
_run_ansible()
except:
provision_deployment._status = 'masters_deploy_failed'
parent_deployment._status = 'masters_provision_deploy_failed'
else:
provision_deployment._status = 'masters_deployed'
parent_deployment._status = 'masters_provision_finished'
finally:
provision_deployment.save()
self.unlock(parent_deployment)
parent_deployment.save()
os.chdir(cwd)
try:
del os.environ['AWS_ACCESS_KEY_ID']
except:
print('no such env')
try:
del os.environ['AWS_SECRET_ACCESS_KEY']
except:
print('no such env')
shutil.rmtree('result/{}'.format(grid_name))
示例8: test_sequence_output
def test_sequence_output():
directory = tempfile.mkdtemp()
assert 0 == len(os.listdir(directory))
cli.main(['--seq', '-v=1', '-e=m', '-o=' + os.path.join(directory, 'test.svg'), 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'])
number_of_files = len(os.listdir(directory))
shutil.rmtree(directory)
assert 4 == number_of_files
示例9: tearDown
def tearDown(self):
try:
rmtree(self.dir)
except OSError as e:
# If directory already deleted, keep going
if e.errno not in (errno.ENOENT, errno.EACCES, errno.EPERM):
raise e
示例10: _download_artifacts_from_url
def _download_artifacts_from_url(url, appstack):
"""
Does the neccessary things to download applications artifacts.
Args:
url: address of artifacts server, i.e. http://artifacts.com/download/{name}
where 'name' param is dynamically replaced to app name
appstack: path to appstack file
"""
if os.path.exists(DEFAULT_ARTIFACTS_PATH):
shutil.rmtree(DEFAULT_ARTIFACTS_PATH, ignore_errors=True)
os.makedirs(DEFAULT_ARTIFACTS_PATH)
with open(appstack) as appstack_file:
appstack_dict = yaml.load(appstack_file)
artifacts_names = set()
for app in appstack_dict['apps']:
artifacts_names.add(app.get('artifact_name', app.get('name')))
for artifact_name in artifacts_names:
artifact_url = url.format(name=artifact_name)
_log.info('Downloading artifact for %s app from %s', artifact_name, artifact_url)
proc = subprocess.Popen(['wget', '--no-check-certificate', '--content-disposition',
'-nv', artifact_url], cwd=DEFAULT_ARTIFACTS_PATH)
if proc.wait() != 0:
_log.error('Error during download! wget output:\n%s', proc.stdout.read())
示例11: remove_or_ignore_dir
def remove_or_ignore_dir(path):
"""Remove a directory if it exists."""
try:
rmtree(path)
except OSError as e:
if e.errno != ENOENT:
raise
示例12: test_build_debian
def test_build_debian(self):
from . import pkgbuild
tmpdir = tempfile.mkdtemp()
try:
source = PackageSource.objects.get(id=1)
br = BuildRecord(source=source, build_counter=10, sha='e65b55054c5220321c56bb3dfa96fbe5199f329c')
br.save()
basedir = os.path.join(tmpdir, 'd')
shutil.copytree(os.path.join(os.path.dirname(__file__), 'test_data', 'debian'), basedir)
orig_stdout = sys.stdout
sys.stdout = StringIO()
try:
pkgbuild.main(['--basedir', basedir, 'version', self.live_server_url + br.get_absolute_url()])
self.assertEquals(sys.stdout.getvalue(), '0.1+10')
sys.stdout = StringIO()
pkgbuild.main(['--basedir', basedir, 'name', self.live_server_url + br.get_absolute_url()])
self.assertEquals(sys.stdout.getvalue(), 'buildsvctest')
pkgbuild.main(['--basedir', basedir, 'build', self.live_server_url + br.get_absolute_url()])
finally:
sys.stdout = orig_stdout
self.assertTrue(os.path.exists(os.path.join(basedir, 'buildsvctest_0.1+10_source.changes')))
self.assertTrue(os.path.exists(os.path.join(basedir, 'buildsvctest_0.1+10_amd64.changes')))
finally:
shutil.rmtree(tmpdir)
示例13: test_upload_with_progress
def test_upload_with_progress(self):
def callback(path, nbytes, history=defaultdict(list)):
history[path].append(nbytes)
return history
dpath = mkdtemp()
try:
path1 = osp.join(dpath, 'foo')
with open(path1, 'w') as writer:
writer.write('hello!')
os.mkdir(osp.join(dpath, 'bar'))
path2 = osp.join(dpath, 'bar', 'baz')
with open(path2, 'w') as writer:
writer.write('the world!')
self.client.upload(
'up',
dpath,
chunk_size=4,
n_threads=1, # Callback isn't thread-safe.
progress=callback
)
eq_(self._read('up/foo'), b'hello!')
eq_(self._read('up/bar/baz'), b'the world!')
eq_(
callback('', 0),
{path1: [4, 6, -1], path2: [4, 8, 10, -1], '': [0]}
)
finally:
rmtree(dpath)
示例14: test_upload_cleanup
def test_upload_cleanup(self):
dpath = mkdtemp()
_write = self.client.write
def write(hdfs_path, *args, **kwargs):
if 'bar' in hdfs_path:
raise RuntimeError()
return _write(hdfs_path, *args, **kwargs)
try:
self.client.write = write
npath = osp.join(dpath, 'hi')
os.mkdir(npath)
with open(osp.join(npath, 'foo'), 'w') as writer:
writer.write('hello!')
os.mkdir(osp.join(npath, 'bar'))
with open(osp.join(npath, 'bar', 'baz'), 'w') as writer:
writer.write('world!')
try:
self.client.upload('foo', dpath)
except RuntimeError:
ok_(not self._exists('foo'))
else:
ok_(False) # This shouldn't happen.
finally:
rmtree(dpath)
self.client.write = _write
示例15: cleanupFiles
def cleanupFiles():
# First get rid of modified files
for l in ["l1", "l2", "l3"]:
arcpy.Delete_management(l)
for f in glob.glob("C:\\Arctmp\\*"):
try:
shutil.rmtree(f)
except:
print "UNABLE TO REMOVE:", f
# Now remove the old directory
for i in xrange(0, 1000000):
new_workspace = "C:\\Arctmp\\workspace." + str(i)
if not os.path.exists(new_workspace):
break
print "TESTING USING WORKSPACE", new_workspace
# Now move in fresh copies
shutil.copytree("C:\\Arcbase", new_workspace)
print "CONTENTS:"
arcpy.env.workspace = new_workspace
for f in sorted(glob.glob(arcpy.env.workspace + "\\*.shp")):
print f
for f in sorted(glob.glob(arcpy.env.workspace + "\\*.lyr")):
print f
for f in sorted(glob.glob(arcpy.env.workspace + "\\*.gdb")):
print f