本文整理匯總了Python中tarfile.open方法的典型用法代碼示例。如果您正苦於以下問題:Python tarfile.open方法的具體用法?Python tarfile.open怎麽用?Python tarfile.open使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tarfile
的用法示例。
在下文中一共展示了tarfile.open方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: convert_image
# 需要導入模塊: import tarfile [as 別名]
# 或者: from tarfile import open [as 別名]
def convert_image(inpath, outpath, size):
"""Convert an image file using ``sips``.
Args:
inpath (str): Path of source file.
outpath (str): Path to destination file.
size (int): Width and height of destination image in pixels.
Raises:
RuntimeError: Raised if ``sips`` exits with non-zero status.
"""
cmd = [
b'sips',
b'-z', str(size), str(size),
inpath,
b'--out', outpath]
# log().debug(cmd)
with open(os.devnull, 'w') as pipe:
retcode = subprocess.call(cmd, stdout=pipe, stderr=subprocess.STDOUT)
if retcode != 0:
raise RuntimeError('sips exited with %d' % retcode)
示例2: detect_missing_tools
# 需要導入模塊: import tarfile [as 別名]
# 或者: from tarfile import open [as 別名]
def detect_missing_tools(distro):
tools_dir = os.path.join('data', 'tools')
if platform.system() == 'Windows':
_7zip_exe = gen.resource_path(
os.path.join(tools_dir, '7zip', '7z.exe'))
e2fsck_exe = gen.resource_path(os.path.join(tools_dir, 'cygwin', 'e2fsck.exe'))
resize2fs_exe = gen.resource_path(os.path.join(tools_dir, 'cygwin', 'resize2fs.exe'))
else:
_7zip_exe = '7z'
e2fsck_exe = 'e2fsck'
resize2fs_exe = 'resize2fs'
if distro not in creator_dict or \
creator_dict[distro][0] is not create_persistence_using_resize2fs:
return None
try:
with open(os.devnull) as devnull:
for tool in [e2fsck_exe, resize2fs_exe]:
p = subprocess.Popen([tool], stdout=devnull, stderr=devnull)
p.communicate()
except FileNotFoundError: # Windows
return "'%s.exe' is not installed or not available for use." % tool
except OSError: # Linux
return "'%s' is not installed or not available for use." % tool
return None
示例3: download_figshare
# 需要導入模塊: import tarfile [as 別名]
# 或者: from tarfile import open [as 別名]
def download_figshare(file_name, file_ext, dir_path='./', change_name = None):
prepare_data_dir(dir_path)
url = 'https://ndownloader.figshare.com/files/' + file_name
wget.download(url, out=dir_path)
file_path = os.path.join(dir_path, file_name)
if file_ext == '.zip':
zip_ref = zipfile.ZipFile(file_path,'r')
if change_name is not None:
dir_path = os.path.join(dir_path, change_name)
zip_ref.extractall(dir_path)
zip_ref.close()
os.remove(file_path)
elif file_ext == '.tar.bz2':
tar_ref = tarfile.open(file_path,'r:bz2')
if change_name is not None:
dir_path = os.path.join(dir_path, change_name)
tar_ref.extractall(dir_path)
tar_ref.close()
os.remove(file_path)
elif change_name is not None:
os.rename(file_path, os.path.join(dir_path, change_name))
# Download QM9 dataset
示例4: __init__
# 需要導入模塊: import tarfile [as 別名]
# 或者: from tarfile import open [as 別名]
def __init__(self, default_compression="gz", logfile=None, verbose=True):
"""
Constructor
@attention:
@param default_compression: default compression used for files
@type default_compression: str | unicode
@param logfile: file handler or file path to a log file
@type logfile: file | io.FileIO | StringIO.StringIO | basestring
@param verbose: Not verbose means that only warnings and errors will be past to stream
@type verbose: bool
@return: None
@rtype: None
"""
assert logfile is None or isinstance(logfile, basestring) or self.is_stream(logfile)
assert isinstance(default_compression, basestring), "separator must be string"
assert isinstance(verbose, bool), "verbose must be true or false"
assert default_compression.lower() in self._open, "Unknown compression: '{}'".format(default_compression)
super(Archive, self).__init__(label="Archive", default_compression=default_compression, logfile=logfile, verbose=verbose)
self._open['tar'] = tarfile.open
self._default_compression = default_compression
示例5: write_payload
# 需要導入模塊: import tarfile [as 別名]
# 或者: from tarfile import open [as 別名]
def write_payload(self):
port = self._port
tar_path = self.create_payload_tar()
log.debug(port.read_until("/ # "))
port.write("base64 -d | tar zxf -\n")
port.flush()
#(tarr, tarw) = os.pipe()
#tar = tarfile.open(mode='w|gz', fileobj=tarw)
#tar.add("payload/patch_toon.sh")
log.info("Transferring payload")
with open(tar_path, 'r') as f:
base64.encode(f, port)
os.remove(tar_path)
port.flush()
port.reset_input_buffer()
port.write("\x04")
port.flush()
示例6: get_caltech101_data
# 需要導入模塊: import tarfile [as 別名]
# 或者: from tarfile import open [as 別名]
def get_caltech101_data():
url = "https://s3.us-east-2.amazonaws.com/mxnet-public/101_ObjectCategories.tar.gz"
dataset_name = "101_ObjectCategories"
data_folder = "data"
if not os.path.isdir(data_folder):
os.makedirs(data_folder)
tar_path = mx.gluon.utils.download(url, path=data_folder)
if (not os.path.isdir(os.path.join(data_folder, "101_ObjectCategories")) or
not os.path.isdir(os.path.join(data_folder, "101_ObjectCategories_test"))):
tar = tarfile.open(tar_path, "r:gz")
tar.extractall(data_folder)
tar.close()
print('Data extracted')
training_path = os.path.join(data_folder, dataset_name)
testing_path = os.path.join(data_folder, "{}_test".format(dataset_name))
return training_path, testing_path
示例7: resolve
# 需要導入模塊: import tarfile [as 別名]
# 或者: from tarfile import open [as 別名]
def resolve(ctx):
from PIL import Image
if isinstance(ctx, list):
ctx = [ctx[0]]
net.load_parameters('superres.params', ctx=ctx)
img = Image.open(opt.resolve_img).convert('YCbCr')
y, cb, cr = img.split()
data = mx.nd.expand_dims(mx.nd.expand_dims(mx.nd.array(y), axis=0), axis=0)
out_img_y = mx.nd.reshape(net(data), shape=(-3, -2)).asnumpy()
out_img_y = out_img_y.clip(0, 255)
out_img_y = Image.fromarray(np.uint8(out_img_y[0]), mode='L')
out_img_cb = cb.resize(out_img_y.size, Image.BICUBIC)
out_img_cr = cr.resize(out_img_y.size, Image.BICUBIC)
out_img = Image.merge('YCbCr', [out_img_y, out_img_cb, out_img_cr]).convert('RGB')
out_img.save('resolved.png')
示例8: _get_data
# 需要導入模塊: import tarfile [as 別名]
# 或者: from tarfile import open [as 別名]
def _get_data(self):
if self._train:
data, label = self._train_data, self._train_label
else:
data, label = self._test_data, self._test_label
namespace = 'gluon/dataset/'+self._namespace
data_file = download(_get_repo_file_url(namespace, data[0]),
path=self._root,
sha1_hash=data[1])
label_file = download(_get_repo_file_url(namespace, label[0]),
path=self._root,
sha1_hash=label[1])
with gzip.open(label_file, 'rb') as fin:
struct.unpack(">II", fin.read(8))
label = np.frombuffer(fin.read(), dtype=np.uint8).astype(np.int32)
with gzip.open(data_file, 'rb') as fin:
struct.unpack(">IIII", fin.read(16))
data = np.frombuffer(fin.read(), dtype=np.uint8)
data = data.reshape(len(label), 28, 28, 1)
self._data = nd.array(data, dtype=data.dtype)
self._label = label
示例9: test_image_detiter
# 需要導入模塊: import tarfile [as 別名]
# 或者: from tarfile import open [as 別名]
def test_image_detiter(self):
im_list = [_generate_objects() + [x] for x in TestImage.IMAGES]
det_iter = mx.image.ImageDetIter(2, (3, 300, 300), imglist=im_list, path_root='')
for _ in range(3):
for batch in det_iter:
pass
det_iter.reset()
val_iter = mx.image.ImageDetIter(2, (3, 300, 300), imglist=im_list, path_root='')
det_iter = val_iter.sync_label_shape(det_iter)
# test file list
fname = './data/test_imagedetiter.lst'
im_list = [[k] + _generate_objects() + [x] for k, x in enumerate(TestImage.IMAGES)]
with open(fname, 'w') as f:
for line in im_list:
line = '\t'.join([str(k) for k in line])
f.write(line + '\n')
det_iter = mx.image.ImageDetIter(2, (3, 400, 400), path_imglist=fname,
path_root='')
for batch in det_iter:
pass
示例10: _download_and_uncompress_dataset
# 需要導入模塊: import tarfile [as 別名]
# 或者: from tarfile import open [as 別名]
def _download_and_uncompress_dataset(dataset_dir):
"""Downloads cifar10 and uncompresses it locally.
Args:
dataset_dir: The directory where the temporary files are stored.
"""
filename = _DATA_URL.split('/')[-1]
filepath = os.path.join(dataset_dir, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(_DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dataset_dir)
示例11: download_and_uncompress_tarball
# 需要導入模塊: import tarfile [as 別名]
# 或者: from tarfile import open [as 別名]
def download_and_uncompress_tarball(tarball_url, dataset_dir):
"""Downloads the `tarball_url` and uncompresses it locally.
Args:
tarball_url: The URL of a tarball file.
dataset_dir: The directory where the temporary files are stored.
"""
filename = tarball_url.split('/')[-1]
filepath = os.path.join(dataset_dir, filename)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (
filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(tarball_url, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
tarfile.open(filepath, 'r:gz').extractall(dataset_dir)
示例12: maybe_download_and_extract
# 需要導入模塊: import tarfile [as 別名]
# 或者: from tarfile import open [as 別名]
def maybe_download_and_extract():
"""Download and extract the tarball from Alex's website."""
dest_directory = FLAGS.data_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
print()
statinfo = os.stat(filepath)
print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')
if not os.path.exists(extracted_dir_path):
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
示例13: extract_mnist_data
# 需要導入模塊: import tarfile [as 別名]
# 或者: from tarfile import open [as 別名]
def extract_mnist_data(filename, num_images, image_size, pixel_depth):
"""
Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
# if not os.path.exists(file):
if not tf.gfile.Exists(filename+".npy"):
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(image_size * image_size * num_images)
data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
data = (data - (pixel_depth / 2.0)) / pixel_depth
data = data.reshape(num_images, image_size, image_size, 1)
np.save(filename, data)
return data
else:
with tf.gfile.Open(filename+".npy", mode='r') as file_obj:
return np.load(file_obj)
示例14: get_wmt_enfr_dev_set
# 需要導入模塊: import tarfile [as 別名]
# 或者: from tarfile import open [as 別名]
def get_wmt_enfr_dev_set(directory):
"""Download the WMT en-fr training corpus to directory unless it's there."""
dev_name = "newstest2013"
dev_path = os.path.join(directory, dev_name)
if not (tf.gfile.Exists(dev_path + ".fr") and
tf.gfile.Exists(dev_path + ".en")):
dev_file = maybe_download(directory, "dev-v2.tgz", _WMT_ENFR_DEV_URL)
print "Extracting tgz file %s" % dev_file
with tarfile.open(dev_file, "r:gz") as dev_tar:
fr_dev_file = dev_tar.getmember("dev/" + dev_name + ".fr")
en_dev_file = dev_tar.getmember("dev/" + dev_name + ".en")
fr_dev_file.name = dev_name + ".fr" # Extract without "dev/" prefix.
en_dev_file.name = dev_name + ".en"
dev_tar.extract(fr_dev_file, directory)
dev_tar.extract(en_dev_file, directory)
return dev_path
示例15: create
# 需要導入模塊: import tarfile [as 別名]
# 或者: from tarfile import open [as 別名]
def create(ctx):
"""
Install a chute from the working directory.
"""
url = "{}/chutes/".format(ctx.obj['base_url'])
headers = {'Content-Type': 'application/x-tar'}
if not os.path.exists("paradrop.yaml"):
raise Exception("No paradrop.yaml file found in working directory.")
with tempfile.TemporaryFile() as temp:
tar = tarfile.open(fileobj=temp, mode="w")
for dirName, subdirList, fileList in os.walk('.'):
for fname in fileList:
path = os.path.join(dirName, fname)
arcname = os.path.normpath(path)
tar.add(path, arcname=arcname)
tar.close()
temp.seek(0)
res = router_request("POST", url, headers=headers, data=temp)
data = res.json()
ctx.invoke(watch, change_id=data['change_id'])