本文整理汇总了Python中zipfile.ZipFile类的典型用法代码示例。如果您正苦于以下问题:Python ZipFile类的具体用法?Python ZipFile怎么用?Python ZipFile使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ZipFile类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: zip_layer_folder
def zip_layer_folder(dir_path, layer_name):
"""
Create a zip archive with the content of the folder located at `dir_path`
and name it with `layer_name`.
Parameters
----------
dir_path: str
The path to the temporary folder in which are located the files to
be zipped.
layer_name: str
The name of the concerned layer (will be used as file name for the
zip archive).
Returns
-------
raw_content: str
The zip archive
archive_name: str
The name of the archive (used later in the header of the response).
"""
filenames = os.listdir(dir_path)
zip_stream = BytesIO()
myZip = ZipFile(zip_stream, "w", compression=ZIP_DEFLATED)
for filename in filenames:
if not filename.endswith('.geojson'):
f_name = path_join(dir_path, filename)
myZip.write(f_name, filename, ZIP_DEFLATED)
myZip.close()
zip_stream.seek(0)
return zip_stream.read(), ''.join([layer_name, ".zip"])
示例2: layer_type
def layer_type(filename):
"""Finds out if a filename is a Feature or a Vector
returns a gsconfig resource_type string
that can be either 'featureType' or 'coverage'
"""
base_name, extension = os.path.splitext(filename)
shp_exts = ['.shp',]
cov_exts = ['.tif', '.tiff', '.geotiff', '.geotif']
csv_exts = ['.csv']
kml_exts = ['.kml']
if extension.lower() == '.zip':
zf = ZipFile(filename)
# ZipFile doesn't support with statement in 2.6, so don't do it
try:
for n in zf.namelist():
b, e = os.path.splitext(n.lower())
if e in shp_exts or e in cov_exts or e in csv_exts:
base_name, extension = b,e
finally:
zf.close()
if extension.lower() in shp_exts + csv_exts + kml_exts:
return FeatureType.resource_type
elif extension.lower() in cov_exts:
return Coverage.resource_type
else:
msg = ('Saving of extension [%s] is not implemented' % extension)
raise GeoNodeException(msg)
示例3: import_view
def import_view(request):
"""
Gets the existing declared parsers for the current project.
This view handles only the file based import parsers.
"""
choices = []
choices_url = []
render_dict = {}
choices, choices_url, classes = discover_available_parsers()
form = ImportDatasetFormWithFile(choices, prefix="with-file")
form_without_file = ImportDatasetForm(
choices_url, prefix="without-file")
if request.method == 'POST':
if 'upload-file' in request.POST:
form = ImportDatasetFormWithFile(
choices, request.POST, request.FILES, prefix="with-file")
if form.is_valid():
print(request.FILES)
uploaded = request.FILES['with-file-zipfile']
destination_dir, destination_file = create_tmp_destination(
uploaded.name)
with open(destination_file, 'w+') as f:
f.write(uploaded.file.read())
zfile = ZipFile(f)
for name in zfile.namelist():
try:
zfile.extract(
name, os.path.dirname(os.path.realpath(f.name)))
if name.endswith('shp'):
parser = classes[int(form['parser'].value())]
import_datas.delay(
'/'.join((destination_dir, name)),
parser.__name__, parser.__module__
)
continue
except Exception:
raise
if 'import-web' in request.POST:
form_without_file = ImportDatasetForm(
choices_url, request.POST, prefix="without-file")
if form_without_file.is_valid():
parser = classes[int(form_without_file['parser'].value())]
import_datas_from_web.delay(
parser.__name__, parser.__module__
)
# Hide second form if parser has no web based imports.
render_dict['form'] = form
if choices_url:
render_dict['form_without_file'] = form_without_file
return render(request, 'common/import_dataset.html', render_dict)
示例4: get_results
def get_results(self):
"""Get analysis results.
@return: data.
"""
root = self._get_root(container="cuckoo", create=False)
if not os.path.exists(root):
return False
zip_data = StringIO()
zip_file = ZipFile(zip_data, "w", ZIP_DEFLATED)
root_len = len(os.path.abspath(root))
for root, dirs, files in os.walk(root):
archive_root = os.path.abspath(root)[root_len:]
for name in files:
path = os.path.join(root, name)
archive_name = os.path.join(archive_root, name)
zip_file.write(path, archive_name, ZIP_DEFLATED)
zip_file.close()
data = xmlrpclib.Binary(zip_data.getvalue())
zip_data.close()
return data
示例5: save_pickle_in_cfile
def save_pickle_in_cfile(self, local_fname, networkref):
""" Creates a pickled version of the graph and stores it in the
cfile
Parameters
----------
local_fname: string
The filename used in the Pickle folder to store
networkref: NetworkX Graph instance
The NetworkX graph to pickle
"""
logger.info('Write a generated graph pickle to the connectome file.')
picklefilepath = os.path.join(tempfile.gettempdir(),local_fname)
from networkx import write_gpickle
# add nodekeys, edgekeys, graphid to helpernode 'n0' before storage
helperdict = {'nodekeys': networkref.nodekeys.copy(), \
'edgekeys': networkref.edgekeys.copy(), \
'graphid' : networkref.networkid }
networkref.graph.add_node('n0')
networkref.graph.node['n0'] = helperdict
write_gpickle(networkref.graph, picklefilepath)
networkref.graph.remove_node('n0')
from zipfile import ZipFile, ZIP_DEFLATED
tmpzipfile = ZipFile(self.data.fullpathtofile, 'a', ZIP_DEFLATED)
# store it in the zip file
tmpzipfile.write(picklefilepath, 'Pickle/' + local_fname)
tmpzipfile.close()
# remove pickle file from system
logger.debug('Unlink: %s' % picklefilepath)
os.unlink(picklefilepath)
示例6: aqcuire_all_resources
def aqcuire_all_resources(self, format_dict):
import cStringIO as StringIO
from zipfile import ZipFile
# Download archive.
url = self.url(format_dict)
shapefile_online = self._urlopen(url)
zfh = ZipFile(StringIO.StringIO(shapefile_online.read()), 'r')
shapefile_online.close()
# Iterate through all scales and levels and extract relevant files.
modified_format_dict = dict(format_dict)
scales = ('c', 'l', 'i', 'h', 'f')
levels = (1, 2, 3, 4)
for scale, level in itertools.product(scales, levels):
modified_format_dict.update({'scale': scale, 'level': level})
target_path = self.target_path(modified_format_dict)
target_dir = os.path.dirname(target_path)
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
for member_path in self.zip_file_contents(modified_format_dict):
ext = os.path.splitext(member_path)[1]
target = os.path.splitext(target_path)[0] + ext
member = zfh.getinfo(member_path)
with open(target, 'wb') as fh:
fh.write(zfh.open(member).read())
zfh.close()
示例7: upload_analyzer
def upload_analyzer(self):
"""Upload analyzer to guest.
@return: operation status.
"""
zip_data = StringIO()
zip_file = ZipFile(zip_data, "w", ZIP_DEFLATED)
root = os.path.join("analyzer", self.platform)
root_len = len(os.path.abspath(root))
if not os.path.exists(root):
log.error("No valid analyzer found at path: %s" % root)
return False
for root, dirs, files in os.walk(root):
archive_root = os.path.abspath(root)[root_len:]
for name in files:
path = os.path.join(root, name)
archive_name = os.path.join(archive_root, name)
zip_file.write(path, archive_name, ZIP_DEFLATED)
zip_file.close()
data = xmlrpclib.Binary(zip_data.getvalue())
zip_data.close()
log.debug("Uploading analyzer to guest (ip=%s)" % self.ip)
self.server.add_analyzer(data)
示例8: getTranslations
def getTranslations(type, localesDir, defaultLocale, projectName, key):
result = urllib2.urlopen('http://api.crowdin.net/api/project/%s/export?key=%s' % (projectName, key)).read()
if result.find('<success') < 0:
raise Exception('Server indicated that the operation was not successful\n' + result)
result = urllib2.urlopen('http://api.crowdin.net/api/project/%s/download/all.zip?key=%s' % (projectName, key)).read()
zip = ZipFile(StringIO(result))
dirs = {}
for info in zip.infolist():
if not info.filename.endswith('.json'):
continue
dir, file = os.path.split(info.filename)
if not re.match(r'^[\w\-]+$', dir) or dir == defaultLocale:
continue
if type == 'chrome' and file.count('.') == 1:
origFile = file
else:
origFile = re.sub(r'\.json$', '', file)
if type == 'gecko' and not origFile.endswith('.dtd') and not origFile.endswith('.properties'):
continue
mapping = langMappingChrome if type == 'chrome' else langMappingGecko
for key, value in mapping.iteritems():
if value == dir:
dir = key
if type == 'chrome':
dir = dir.replace('-', '_')
data = zip.open(info.filename).read()
if data == '[]':
continue
if not dir in dirs:
dirs[dir] = set()
dirs[dir].add(origFile)
path = os.path.join(localesDir, dir, origFile)
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
if type == 'chrome' and origFile.endswith('.json'):
postprocessChromeLocale(path, data)
elif type == 'chrome':
data = json.loads(data)
if origFile in data:
fileHandle = codecs.open(path, 'wb', encoding='utf-8')
fileHandle.write(data[origFile]['message'])
fileHandle.close()
else:
fromJSON(path, data)
# Remove any extra files
for dir, files in dirs.iteritems():
baseDir = os.path.join(localesDir, dir)
if not os.path.exists(baseDir):
continue
for file in os.listdir(baseDir):
path = os.path.join(baseDir, file)
if os.path.isfile(path) and (file.endswith('.json') or file.endswith('.properties') or file.endswith('.dtd')) and not file in files:
os.remove(path)
示例9: parse_template
def parse_template(template_name):
"""Resolve template name into absolute path to the template
and boolean if absolute path is temporary directory.
"""
if template_name.startswith('http'):
if '#' in template_name:
url, subpath = template_name.rsplit('#', 1)
else:
url = template_name
subpath = ''
with tempfile.NamedTemporaryFile() as tmpfile:
urlretrieve(url, tmpfile.name)
if not is_zipfile(tmpfile.name):
raise ConfigurationError("Not a zip file: %s" % tmpfile)
zf = ZipFile(tmpfile)
try:
path = tempfile.mkdtemp()
zf.extractall(path)
return os.path.join(path, subpath), True
finally:
zf.close()
registry = TemplatesRegistry()
if registry.has_template(template_name):
path = registry.path_of_template(template_name)
elif ':' in template_name:
path = resolve_dotted_path(template_name)
else:
path = os.path.realpath(template_name)
if not os.path.isdir(path):
raise ConfigurationError('Template directory does not exist: %s' % path)
return path, False
示例10: download_unzip
def download_unzip(input_zip):
url = urllib.urlopen(input_zip)
unzipped_string = ''
zipfile = ZipFile(StringIO(url.read()))
for name in zipfile.namelist():
unzipped_string += zipfile.open(name).read()
return unzipped_string
示例11: get_sightings_from_atlas
def get_sightings_from_atlas(uri, species_ids):
# Create a dict of sightings
# Each species ID will have a list of sightings with [lat, long]
sightings = dict()
for species_id in species_ids:
sightings[species_id] = []
# The CSV headers
LONG = 0
LAT = 1
LSID = 2
# Download API call and unzip
url = urlopen(uri)
zipfile = ZipFile(StringIO(url.read()))
# Skip the header row using [1:]
for line in zipfile.open("data.csv").readlines()[1:]:
sighting_record = line.split(",")
sightings[sighting_record[LSID][1:-2]].append([sighting_record[LAT][1:-1],sighting_record[LONG][1:-1]])
for species_id in species_ids:
# Don't return too many sightings for a single species
sightings[species_id] = sightings[species_id][0:species_sighting_limit]
# Prune any empty entries
if sightings[species_id] == []: del sightings[species_id]
return sightings
示例12: export_zip
def export_zip(self, paths):
stringio = StringIO()
archive = ZipFile(stringio, mode='w')
def _add_resource(resource):
for filename in resource.get_files_to_archive(True):
if filename.endswith('.metadata'):
continue
path = Path(self.handler.key).get_pathto(filename)
archive.writestr(str(path), resource.handler.to_str())
for path in paths:
child = self.get_resource(path, soft=True)
if child is None:
continue
# A Folder => we add its content
if isinstance(child, Folder):
for subchild in child.traverse_resources():
if subchild is None or isinstance(subchild, Folder):
continue
_add_resource(subchild)
else:
_add_resource(child)
archive.close()
return stringio.getvalue()
示例13: createDevEnv
def createDevEnv(baseDir, type):
fileBuffer = StringIO()
createBuild(baseDir, type=type, outFile=fileBuffer, devenv=True, releaseBuild=True)
from zipfile import ZipFile
zip = ZipFile(StringIO(fileBuffer.getvalue()), 'r')
zip.extractall(os.path.join(baseDir, 'devenv'))
zip.close()
print 'Development environment created, waiting for connections from active extensions...'
metadata = readMetadata(baseDir, type)
connections = [0]
import SocketServer, time, thread
class ConnectionHandler(SocketServer.BaseRequestHandler):
def handle(self):
connections[0] += 1
self.request.sendall('HTTP/1.0 OK\nConnection: close\n\n%s' % metadata.get('general', 'basename'))
server = SocketServer.TCPServer(('localhost', 43816), ConnectionHandler)
def shutdown_server(server):
time.sleep(10)
server.shutdown()
thread.start_new_thread(shutdown_server, (server,))
server.serve_forever()
if connections[0] == 0:
print 'Warning: No incoming connections, extension probably not active in the browser yet'
else:
print 'Handled %i connection(s)' % connections[0]
示例14: get_info
def get_info(in_stream):
""" Return the version and submitter strings from zipfile byte stream. """
arch = ZipFile(in_stream, 'r')
try:
return unpack_info(arch.read('__INFO__'))
finally:
arch.close()
示例15: load_property_inspection
def load_property_inspection():
"""
Loads and returns several variables for the data set from Kaggle's Property Inspection Prediction competition.
Link: https://www.kaggle.com/c/liberty-mutual-group-property-inspection-prediction
Returns
----------
data : array-like
Pandas data frame containing the entire data set.
X : array-like
Training input samples.
y : array-like
Target values.
"""
file_location = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data', 'property_inspection.zip')
z = ZipFile(file_location)
data = pd.read_csv(z.open('train.csv'))
data = data.set_index('Id')
X = data.iloc[:, 1:].values
y = data.iloc[:, 0].values
# transform the categorical variables from strings to integers
encoder = CategoryEncoder()
X = encoder.fit_transform(X)
return data, X, y