本文整理汇总了Python中shutil.unpack_archive方法的典型用法代码示例。如果您正苦于以下问题:Python shutil.unpack_archive方法的具体用法?Python shutil.unpack_archive怎么用?Python shutil.unpack_archive使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类shutil
的用法示例。
在下文中一共展示了shutil.unpack_archive方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train
# 需要导入模块: import shutil [as 别名]
# 或者: from shutil import unpack_archive [as 别名]
def train(self, save_data, opt):
save_data_archive = save_temp_bin(save_data)
save_data_dir = temp_dir()
shutil.unpack_archive(filename=save_data_archive, extract_dir=save_data_dir, format="gztar")
save_model = temp_dir()
opt["data"] = save_data_dir + "data"
opt["save_model"] = save_model
if is_cuda:
opt["world_size"] = 1
opt["gpu_ranks"] = 0
run_param('train.py', opt)
return save_model
示例2: get_mathlib_olean
# 需要导入模块: import shutil [as 别名]
# 或者: from shutil import unpack_archive [as 别名]
def get_mathlib_olean(self) -> None:
"""Get precompiled mathlib oleans for this project."""
# Just in case the user broke the workflow (for instance git clone
# mathlib by hand and then run `leanproject get-cache`)
if not (self.directory/'leanpkg.path').exists():
self.run(['leanpkg', 'configure'])
try:
archive = get_mathlib_archive(self.mathlib_rev, self.cache_url,
self.force_download, self.repo)
except (EOFError, shutil.ReadError):
log.info('Something wrong happened with the olean archive. '
'I will now retry downloading.')
archive = get_mathlib_archive(self.mathlib_rev, self.cache_url,
True, self.repo)
self.clean_mathlib()
self.mathlib_folder.mkdir(parents=True, exist_ok=True)
unpack_archive(archive, self.mathlib_folder)
# Let's now touch oleans, just in case
touch_oleans(self.mathlib_folder)
示例3: currentdir
# 需要导入模块: import shutil [as 别名]
# 或者: from shutil import unpack_archive [as 别名]
def currentdir(self, directory: Optional[str]) -> None:
if directory is None:
self._currentdir = None
else:
dirpath = os.path.join(self.basepath, directory)
zippath = f'{dirpath}.zip'
if os.path.exists(zippath):
shutil.unpack_archive(
filename=zippath,
extract_dir=os.path.join(self.basepath, directory),
format='zip',
)
os.remove(zippath)
elif not os.path.exists(dirpath):
os.makedirs(dirpath)
self._currentdir = str(directory)
示例4: untar
# 需要导入模块: import shutil [as 别名]
# 或者: from shutil import unpack_archive [as 别名]
def untar(path, fname, deleteTar=True):
"""
Unpack the given archive file to the same directory.
:param str path:
The folder containing the archive. Will contain the contents.
:param str fname:
The filename of the archive file.
:param bool deleteTar:
If true, the archive will be deleted after extraction.
"""
logging.debug(f'unpacking {fname}')
fullpath = os.path.join(path, fname)
shutil.unpack_archive(fullpath, path)
if deleteTar:
os.remove(fullpath)
示例5: _restore_from_backup
# 需要导入模块: import shutil [as 别名]
# 或者: from shutil import unpack_archive [as 别名]
def _restore_from_backup(self, src_ver: str):
logger.info('Restoring from backup for {}'.format(src_ver))
for file_path in self.files_to_preserve:
try:
shutil.copy2(os.path.join(self.backup_target, file_path),
os.path.join(self.tmp_dir, file_path))
except IOError as e:
logger.warning('Copying {} failed due to {}'
.format(file_path, e))
shutil.unpack_archive(self._backup_name_ext(
src_ver), self.backup_target, self.backup_format)
for file_path in self.files_to_preserve:
try:
shutil.copy2(os.path.join(self.tmp_dir, file_path),
os.path.join(self.backup_target, file_path))
except IOError as e:
logger.warning('Copying {} failed due to {}'
.format(file_path, e))
shutil.rmtree(self.tmp_dir, ignore_errors=True)
示例6: test_cached_download
# 需要导入模块: import shutil [as 别名]
# 或者: from shutil import unpack_archive [as 别名]
def test_cached_download(self, ts=httptest.NoServer()):
with tempfile.TemporaryDirectory() as tempdir:
@cached_download(
ts.url() + "/archive.tar.gz",
pathlib.Path(tempdir) / "archive.tar.gz",
ARCHIVE_HASH,
protocol_allowlist=["http://"],
)
async def func(filename):
return filename
# Directory to extract to
extracted = pathlib.Path(tempdir, "extracted")
# Unpack the archive
shutil.unpack_archive(await func(), extracted)
self.verify_extracted_contents(extracted)
示例7: untar
# 需要导入模块: import shutil [as 别名]
# 或者: from shutil import unpack_archive [as 别名]
def untar(path, fname, deleteTar=True):
"""
Unpack the given archive file to the same directory.
:param str path:
The folder containing the archive. Will contain the contents.
:param str fname:
The filename of the archive file.
:param bool deleteTar:
If true, the archive will be deleted after extraction.
"""
print('unpacking ' + fname)
fullpath = os.path.join(path, fname)
shutil.unpack_archive(fullpath, path)
if deleteTar:
os.remove(fullpath)
示例8: test_register_unpack_archive
# 需要导入模块: import shutil [as 别名]
# 或者: from shutil import unpack_archive [as 别名]
def test_register_unpack_archive(tmp_path):
shutil.register_unpack_format('7zip', ['.7z'], unpack_7zarchive)
shutil.unpack_archive(str(testdata_path.joinpath('test_1.7z')), str(tmp_path))
target = tmp_path.joinpath("setup.cfg")
expected_mode = 33188
expected_mtime = 1552522033
if os.name == 'posix':
assert target.stat().st_mode == expected_mode
assert target.stat().st_mtime == expected_mtime
m = hashlib.sha256()
m.update(target.open('rb').read())
assert m.digest() == binascii.unhexlify('ff77878e070c4ba52732b0c847b5a055a7c454731939c3217db4a7fb4a1e7240')
m = hashlib.sha256()
m.update(tmp_path.joinpath('setup.py').open('rb').read())
assert m.digest() == binascii.unhexlify('b916eed2a4ee4e48c51a2b51d07d450de0be4dbb83d20e67f6fd166ff7921e49')
m = hashlib.sha256()
m.update(tmp_path.joinpath('scripts/py7zr').open('rb').read())
assert m.digest() == binascii.unhexlify('b0385e71d6a07eb692f5fb9798e9d33aaf87be7dfff936fd2473eab2a593d4fd')
示例9: _download_and_extract_model_tar_gz
# 需要导入模块: import shutil [as 别名]
# 或者: from shutil import unpack_archive [as 别名]
def _download_and_extract_model_tar_gz(self, model_id):
"""
This function first gets the s3 location from dynamo db,
downloads the model, extracts it and then
returns a tuple (str, str) of metadata string and model weights URL on disk
"""
deployable_model_id_record = self.model_ddb_wrapper.get_model_record(experiment_id=self.experiment_id,
model_id=model_id)
s3_uri = deployable_model_id_record.get("s3_model_output_path", "")
if s3_uri:
try:
tmp_dir = Path(f"/opt/ml/downloads/{gen_random_string()}")
tmp_dir.mkdir(parents=True, exist_ok=True)
tmp_model_tar_gz = os.path.join(tmp_dir.as_posix(), "model.tar.gz")
bucket, key = parse_s3_url(s3_uri)
self.s3_resource.Bucket(bucket).download_file(key, tmp_model_tar_gz)
shutil.unpack_archive(filename=tmp_model_tar_gz, extract_dir=tmp_dir.as_posix())
return self.get_model(tmp_dir.as_posix())
except Exception as e:
logger.exception(f"Could not parse or download {model_id} from {s3_uri} due to {e}")
return None
else:
logger.exception(f"Could not s3 location of {model_id}")
return None
示例10: download
# 需要导入模块: import shutil [as 别名]
# 或者: from shutil import unpack_archive [as 别名]
def download(_, target, droot, __):
url = target["url"]
fname = target.get("target", url.split("/")[-1])
r = requests.get(
url,
stream=True,
headers={
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1 Safari/605.1.15"
},
)
chars = "-\\|/"
with open(f"{droot}/{fname}", "wb") as f:
for i, chunk in enumerate(r.iter_content(chunk_size=1024)):
arrow(f"Downloading... {chars[i%len(chars)]}", end="\r")
if chunk:
f.write(chunk)
if fname.endswith(".zip") or fname.endswith(".tar.gz"):
arrow(f"Unpacking {fname}...")
shutil.unpack_archive(f"{droot}/{fname}", droot)
示例11: untar
# 需要导入模块: import shutil [as 别名]
# 或者: from shutil import unpack_archive [as 别名]
def untar(path, fname, deleteTar=True):
"""
Unpacks the given archive file to the same directory, then (by default)
deletes the archive file.
"""
print('unpacking ' + fname)
fullpath = os.path.join(path, fname)
shutil.unpack_archive(fullpath, path)
if deleteTar:
os.remove(fullpath)
示例12: update_pdfjs
# 需要导入模块: import shutil [as 别名]
# 或者: from shutil import unpack_archive [as 别名]
def update_pdfjs(target_version=None):
"""Download and extract the latest pdf.js version.
If target_version is not None, download the given version instead.
Args:
target_version: None or version string ('x.y.z')
"""
if target_version is None:
version, url = get_latest_pdfjs_url()
else:
# We need target_version as x.y.z, without the 'v' prefix, though the
# user might give it on the command line
if target_version.startswith('v'):
target_version = target_version[1:]
# version should have the prefix to be consistent with the return value
# of get_latest_pdfjs_url()
version = 'v' + target_version
url = ('https://github.com/mozilla/pdf.js/releases/download/'
'v{0}/pdfjs-{0}-dist.zip').format(target_version)
os.chdir(os.path.join(os.path.dirname(os.path.abspath(__file__)),
'..', '..'))
target_path = os.path.join('qutebrowser', '3rdparty', 'pdfjs')
print("=> Downloading pdf.js {}".format(version))
try:
(archive_path, _headers) = urllib.request.urlretrieve(url)
except urllib.error.HTTPError as error:
print("Could not retrieve pdfjs {}: {}".format(version, error))
return
if os.path.isdir(target_path):
print("Removing old version in {}".format(target_path))
shutil.rmtree(target_path)
os.makedirs(target_path)
print("Extracting new version")
shutil.unpack_archive(archive_path, target_path, 'zip')
urllib.request.urlcleanup()
示例13: _extract_modules
# 需要导入模块: import shutil [as 别名]
# 或者: from shutil import unpack_archive [as 别名]
def _extract_modules(self, dep, archive_path: Path, output_path: Path) -> bool:
# say to shutils that wheel can be parsed as zip
if 'wheel' not in shutil._UNPACK_FORMATS: # type: ignore
shutil.register_unpack_format(
name='wheel',
extensions=['.whl'],
function=shutil._unpack_zipfile, # type: ignore
)
with TemporaryDirectory(suffix=dep.name) as package_path: # type: Path # type: ignore
package_path = Path(package_path)
shutil.unpack_archive(str(archive_path), str(package_path))
if len(list(package_path.iterdir())) == 1:
package_path = next(package_path.iterdir())
# find modules
root = PackageRoot(name=dep.name, path=package_path)
if not root.packages:
self.logger.error('cannot find modules', extra=dict(
dependency=dep.name,
version=dep.group.best_release.version,
))
return False
# copy modules
module_path = root.packages[0].path
module_name = root.packages[0].module
self.logger.info('copying module...', extra=dict(
path=str(module_path.relative_to(package_path)),
dependency=dep.name,
))
shutil.copytree(
src=str(module_path),
dst=str(output_path.joinpath(*module_name.split('.'))),
)
return True
示例14: load
# 需要导入模块: import shutil [as 别名]
# 或者: from shutil import unpack_archive [as 别名]
def load(filename: str):
"""
Load model from NodeEmbedding model zip file.
filename : str
full filename of file to load (including extensions)
The file should be the result of a `save()` call
Loading checks for metadata and raises warnings if pkg versions
are different than they were when saving the model.
"""
with tempfile.TemporaryDirectory() as temp_dir:
shutil.unpack_archive(filename, temp_dir, 'zip')
model = joblib.load(os.path.join(temp_dir, BaseNodeEmbedder.f_model))
with open(os.path.join(temp_dir, BaseNodeEmbedder.f_mdata)) as f:
meta_data = json.load(f)
# Validate the metadata
sysverinfo = sys.version_info
pyver = "{0}.{1}".format(sysverinfo[0], sysverinfo[1])
if meta_data["python_"] != pyver:
raise UserWarning(
"Invalid python version; {0}, required: {1}".format(
pyver, meta_data["python_"]))
sklver = sklearn.__version__[:-2]
if meta_data["skl_"] != sklver:
raise UserWarning(
"Invalid sklearn version; {0}, required: {1}".format(
sklver, meta_data["skl_"]))
pdver = pd.__version__[:-2]
if meta_data["pd_"] != pdver:
raise UserWarning(
"Invalid pandas version; {0}, required: {1}".format(
pdver, meta_data["pd_"]))
csrv = cg.__version__[:-2]
if meta_data["csrg_"] != csrv:
raise UserWarning(
"Invalid csrgraph version; {0}, required: {1}".format(
csrv, meta_data["csrg_"]))
return model
示例15: extract
# 需要导入模块: import shutil [as 别名]
# 或者: from shutil import unpack_archive [as 别名]
def extract(cls, download_path, extract_dir):
extracted_files = [
join(extract_dir, f) for f in cls.files]
if any(not exists(p) for p in extracted_files):
shutil.unpack_archive(
download_path, extract_dir)
return extracted_files