本文整理汇总了Python中os.path.getsize方法的典型用法代码示例。如果您正苦于以下问题:Python path.getsize方法的具体用法?Python path.getsize怎么用?Python path.getsize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类os.path
的用法示例。
在下文中一共展示了path.getsize方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: list
# 需要导入模块: from os import path [as 别名]
# 或者: from os.path import getsize [as 别名]
def list(self):
dirs = []
# TODO: error management
for d in listdir(self.datapath):
dp = path.join(self.datapath, d)
if path.isdir(dp):
dirs.append({
"name": d,
"path": dp,
"mtime": datetime.datetime.fromtimestamp(path.getmtime(dp)).strftime('%d.%m.%Y %H:%M:%S')
# "size": path.getsize(dp),
# "hsize": self.human_size(self.get_size(dp))
})
dirs.sort(key=lambda dir: dir['name'], reverse=True)
return {'status': 'success', 'data': dirs}
示例2: __init__
# 需要导入模块: from os import path [as 别名]
# 或者: from os.path import getsize [as 别名]
def __init__(self, path, split, tokenizer, bucket_size, ascending=False):
# Setup
self.path = path
self.bucket_size = bucket_size
# List all wave files
file_list = []
for s in split:
split_list = list(Path(join(path, s)).rglob("*.flac"))
assert len(split_list) > 0, "No data found @ {}".format(join(path,s))
file_list += split_list
# Read text
text = Parallel(n_jobs=READ_FILE_THREADS)(
delayed(read_text)(str(f)) for f in file_list)
#text = Parallel(n_jobs=-1)(delayed(tokenizer.encode)(txt) for txt in text)
text = [tokenizer.encode(txt) for txt in text]
# Sort dataset by text length
#file_len = Parallel(n_jobs=READ_FILE_THREADS)(delayed(getsize)(f) for f in file_list)
self.file_list, self.text = zip(*[(f_name, txt)
for f_name, txt in sorted(zip(file_list, text), reverse=not ascending, key=lambda x:len(x[1]))])
示例3: verify
# 需要导入模块: from os import path [as 别名]
# 或者: from os.path import getsize [as 别名]
def verify(self, sha1=None):
_dlsize = getsize(self._destination)
if _dlsize != self.get_size():
raise FDSizeMismatch(_dlsize, self._fname, self.get_size())
if not sha1:
return
dlsha1 = None
try:
result = util.exec_command(["sha1sum", self._destination])
dlsha1 = result['out']
except OSError:
try:
result = util.exec_command(
["shasum", "-a", "1", self._destination])
dlsha1 = result['out']
except OSError:
pass
if dlsha1:
dlsha1 = dlsha1[1:41] if dlsha1.startswith("\\") else dlsha1[:40]
if sha1 != dlsha1:
raise FDSHASumMismatch(dlsha1, self._fname, sha1)
示例4: clear_cache
# 需要导入模块: from os import path [as 别名]
# 或者: from os.path import getsize [as 别名]
def clear_cache(force = False):
"""
If the folder exists, and has more than 5MB of icons in the cache, delete
it to clear all the icons then recreate it.
"""
from os.path import getsize, join, isfile, exists
from os import makedirs, listdir
from sublime import cache_path
from shutil import rmtree
# The icon cache path
icon_path = join(cache_path(), "GutterColor")
# The maximum amount of space to take up
limit = 5242880 # 5 MB
if exists(icon_path):
size = sum(getsize(join(icon_path, f)) for f in listdir(icon_path) if isfile(join(icon_path, f)))
if force or (size > limit): rmtree(icon_path)
if not exists(icon_path): makedirs(icon_path)
示例5: _read
# 需要导入模块: from os import path [as 别名]
# 或者: from os.path import getsize [as 别名]
def _read(filename):
""" Reads Fortran style binary data into numpy array
"""
nbytes = getsize(filename)
with open(filename, 'rb') as file:
# read size of record
file.seek(0)
n = np.fromfile(file, dtype='int32', count=1)[0]
if n == nbytes-8:
file.seek(4)
data = np.fromfile(file, dtype='float32')
return data[:-1]
else:
file.seek(0)
data = np.fromfile(file, dtype='float32')
return data
示例6: _scan_dynamodb_and_upload_to_s3
# 需要导入模块: from os import path [as 别名]
# 或者: from os.path import getsize [as 别名]
def _scan_dynamodb_and_upload_to_s3(self, temp_file, scan_kwargs, table):
while True:
response = table.scan(**scan_kwargs)
items = response['Items']
for item in items:
temp_file.write(self.process_func(item))
if 'LastEvaluatedKey' not in response:
# no more items to scan
break
last_evaluated_key = response['LastEvaluatedKey']
scan_kwargs['ExclusiveStartKey'] = last_evaluated_key
# Upload the file to S3 if reach file size limit
if getsize(temp_file.name) >= self.file_size:
_upload_file_to_s3(temp_file, self.s3_bucket_name,
self.s3_key_prefix)
temp_file.close()
temp_file = NamedTemporaryFile()
return temp_file
示例7: test_write_gff_file
# 需要导入模块: from os import path [as 别名]
# 或者: from os.path import getsize [as 别名]
def test_write_gff_file(self, seqprop_with_i, tmpdir):
"""Test writing the features, and that features are now loaded from a file"""
outpath = tmpdir.join('test_seqprop_with_i_write_gff_file.gff').strpath
seqprop_with_i.write_gff_file(outfile=outpath, force_rerun=True)
# Test that the file was written
assert op.exists(outpath)
assert op.getsize(outpath) > 0
# Test that file paths are correct
assert seqprop_with_i.feature_path == outpath
assert seqprop_with_i.feature_file == 'test_seqprop_with_i_write_gff_file.gff'
assert seqprop_with_i.feature_dir == tmpdir
# Test that features cannot be changed
with pytest.raises(ValueError):
seqprop_with_i.features = ['NOFEATURES']
示例8: traverse_dir
# 需要导入模块: from os import path [as 别名]
# 或者: from os.path import getsize [as 别名]
def traverse_dir(path):
file_dict = {}
dir_dict = {}
count = 1
for root, dirs, files in walk(path):
for d in dirs:
abs_p = join(root, d)
dir_dict[abs_p] = 0
print(abs_p)
count += 1
if count % 200 == 0:
print('%s files scanned' % count)
for f in files:
abs_p = join(root, f)
file_dict[abs_p] = getsize(abs_p)
print(abs_p)
count += 1
if count % 200 == 0:
print('%s files scanned' % count)
return file_dict, dir_dict
示例9: get_sha1_by_slice
# 需要导入模块: from os import path [as 别名]
# 或者: from os.path import getsize [as 别名]
def get_sha1_by_slice(file_name, slice_size):
""" Get SHA array based on Qcloud Slice Upload Interface
:param file_name: local file path
:param slice_size: slice size in bit
:return: sha array like [{“offset”:0, “datalen”:1024,”datasha”:”aaa”}, {}, {}]
"""
from os import path
with open(file_name, 'rb') as f:
result = []
file_size = path.getsize(file_name)
sha1_obj = Sha1Hash()
for current_offset in range(0, file_size, slice_size):
data_length = min(slice_size, file_size - current_offset)
sha1_obj.update(f.read(data_length))
sha1_val = sha1_obj.inner_digest()
result.append({"offset": current_offset, "datalen": data_length, "datasha": sha1_val})
result[-1]['datasha'] = sha1_obj.hexdigest()
return result
示例10: _close
# 需要导入模块: from os import path [as 别名]
# 或者: from os.path import getsize [as 别名]
def _close(self, file):
f = self.handles[file]
del self.handles[file]
if self.whandles.has_key(file):
del self.whandles[file]
f.flush()
self.unlock_file(file, f)
f.close()
if os.path.isfile(file):
self.tops[file] = getsize(file)
self.mtimes[file] = getmtime(file)
else:
if DEBUG:
log(self.log_prefix + '_close: missing file', file)
self.tops[file] = 0
self.mtimes[file] = 0
else:
if self.lock_while_reading:
self.unlock_file(file, f)
f.close()
示例11: enable_file
# 需要导入模块: from os import path [as 别名]
# 或者: from os.path import getsize [as 别名]
def enable_file(self, f):
if self.config['encrypted_storage']:
return
if not self.disabled[f]:
return
self.disabled[f] = False
r = self.file_ranges[f]
if not r:
return
file = r[3]
if not exists(file):
h = open(file, 'wb+')
h.flush()
h.close()
if not self.tops.has_key(file):
self.tops[file] = getsize(file)
if not self.mtimes.has_key(file):
self.mtimes[file] = getmtime(file)
self.working_ranges[f] = [r]
if DEBUG:
log(self.log_prefix + 'enable_file: f:', f, 'self.working_ranges:', self.working_ranges)
示例12: disable_file
# 需要导入模块: from os import path [as 别名]
# 或者: from os.path import getsize [as 别名]
def disable_file(self, f):
if self.config['encrypted_storage']:
return
if self.disabled[f]:
return
self.disabled[f] = True
r = self._get_disabled_ranges(f)
if not r:
return
for file, begin, end in r[2]:
if not os.path.isdir(self.bufferdir):
os.makedirs(self.bufferdir)
if not exists(file):
h = open(file, 'wb+')
h.flush()
h.close()
if not self.tops.has_key(file):
self.tops[file] = getsize(file)
if not self.mtimes.has_key(file):
self.mtimes[file] = getmtime(file)
self.working_ranges[f] = r[0]
示例13: pickle
# 需要导入模块: from os import path [as 别名]
# 或者: from os.path import getsize [as 别名]
def pickle(self):
files = []
pfiles = []
for i in xrange(len(self.files)):
if not self.files[i][1]:
continue
if self.disabled[i]:
for file, start, end in self._get_disabled_ranges(i)[2]:
pfiles.extend([basename(file), getsize(file), getmtime(file)])
continue
file = self.files[i][0].encode('utf-8')
files.extend([i, getsize(file), getmtime(file)])
return {'files': files,
'partial files': pfiles}
示例14: rinexobs
# 需要导入模块: from os import path [as 别名]
# 或者: from os.path import getsize [as 别名]
def rinexobs(obsfn,writeh5=None,maxtimes=None):
stem,ext = splitext(expanduser(obsfn))
if ext[-1].lower() == 'o': #raw text file
with open(obsfn,'r') as f:
t=time.time()
lines = f.read().splitlines(True)
lines.append('')
header,version,headlines,obstimes,sats,svset = scan(lines)
print('{} is a RINEX {} file, {} kB.'.format(obsfn,version,getsize(obsfn)/1000.0))
data = processBlocks(lines,header,obstimes,svset,headlines,sats)
print("finished in {0:.2f} seconds".format(time.time()-t))
#%% save to disk (optional)
if writeh5:
h5fn = stem + '.h5'
print('saving OBS data to {}'.format(h5fn))
data.to_hdf(h5fn,key='OBS',mode='a',complevel=6,append=False)
elif ext.lower() == '.h5':
data = read_hdf(obsfn,key='OBS')
print('loaded OBS data from {} to {}'.format(blocks.items[0],blocks.items[-1]))
return data
# this will scan the document for the header info and for the line on
# which each block starts
示例15: __init__
# 需要导入模块: from os import path [as 别名]
# 或者: from os.path import getsize [as 别名]
def __init__(self,addr=0,size=None,file=None,process=True):
self.init()
if not size:
size = ospath_getsize(file)
self.file_size = size
self.file = file
self.addr = addr
self.size = size
self.type = 'TGA'
self.pallete = None
self.Read_header()
self.flipped = False
if self.header['ImageDescriptor'] & 32:
self.flipped = True
self.alpha_bits = self.header['ImageDescriptor'] & 15
self.Get_image_dimensions()
self.size_of_plane = self.width * self.height
self.sourceBpp = self.header['BitsPerPixel']/8
self.data_size = self.width * self.height * self.sourceBpp
self.Bps = self.width * self.sourceBpp
self.Bpp = 4
self.final_size = self.size_of_plane * self.Bpp
self.RGBA = None
if not process: return
self.Process()