本文整理汇总了Python中scandir.walk方法的典型用法代码示例。如果您正苦于以下问题:Python scandir.walk方法的具体用法?Python scandir.walk怎么用?Python scandir.walk使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scandir
的用法示例。
在下文中一共展示了scandir.walk方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _cleanup_pyc
# 需要导入模块: import scandir [as 别名]
# 或者: from scandir import walk [as 别名]
def _cleanup_pyc(recipe_deps):
"""Removes any .pyc files from the recipes/recipe_module directories.
Args:
* recipe_deps (RecipeDeps) - The loaded recipe dependencies.
"""
for repo in recipe_deps.repos.itervalues():
for relpath in ('recipes', 'recipe_modules'):
to_walk = os.path.join(repo.recipes_root_path, relpath)
for root, _dirs, files in OS_WALK(to_walk):
for fname in files:
if not fname.endswith('.pyc'):
continue
try:
to_clean = os.path.join(root, fname)
LOG.info('cleaning %r', to_clean)
os.unlink(to_clean)
except OSError as ex:
# If multiple things are cleaning pyc's at the same time this can
# race. Fortunately we only care that SOMETHING deleted the pyc :)
if ex.errno != errno.ENOENT:
raise
示例2: find_results
# 需要导入模块: import scandir [as 别名]
# 或者: from scandir import walk [as 别名]
def find_results(location, pattern):
""" Create list of result files and return sorted
Args:
location (str): directory location to search
pattern (str): glob style search pattern for results
Returns:
results (list): list of file paths for results found
"""
# Note: already checked for location existence in main()
records = []
for root, dirnames, filenames in walk(location):
for filename in fnmatch.filter(filenames, pattern):
records.append(os.path.join(root, filename))
if len(records) == 0:
raise IOError('Could not find results in: %s' % location)
records.sort()
return records
示例3: rules
# 需要导入模块: import scandir [as 别名]
# 或者: from scandir import walk [as 别名]
def rules(self):
arg_edit = self.args.edit
# Retrieve the list of rules and populate a list.
rules = []
count = 1
for folder, folders, files in walk(self.rule_path):
for file_name in files:
rules.append([count, os.path.join(folder, file_name)])
count += 1
# If the user wants to edit a specific rule, loop through all of them
# identify which one to open, and launch the default editor.
if arg_edit:
for rule in rules:
if int(arg_edit) == rule[0]:
os.system('"${EDITOR:-nano}" ' + rule[1])
break
# Otherwise, just print the list.
else:
self.log('table', dict(header=['#', 'Path'], rows=rules))
self.log('', "")
self.log('', "You can edit these rules by specifying --edit and the #")
示例4: paths
# 需要导入模块: import scandir [as 别名]
# 或者: from scandir import walk [as 别名]
def paths(self):
"""
Generate XML manifest paths.
"""
for root, dirs, files in scandir.walk(self.path):
# Filter out non-XML files.
xmls = [
n for n in files
if os.path.splitext(n)[-1] == '.xml'
]
for name in xmls:
yield os.path.join(root, name)
示例5: scrape_all_bills
# 需要导入模块: import scandir [as 别名]
# 或者: from scandir import walk [as 别名]
def scrape_all_bills(bill_data_path, num_workers):
logging.basicConfig(filename=BILL_SCRAPER_LOG, level=logging.DEBUG)
bill_file_paths = []
for dirname, dirnames, filenames in walk(bill_data_path):
for filename in filenames:
bill_file_paths.append(os.path.join(dirname, filename))
scrape_bill_document_from_sunlight(bill_file_paths[0])
random.shuffle(bill_file_paths)
pool = multiprocessing.Pool(num_workers)
print "fetch {0} urls from sunlight...".format(len(bill_file_paths))
pool.map(scrape_bill_document_from_sunlight, bill_file_paths)
print "finished fetching urls..."
# open individual json file and scrape bill document,
# from the s3 server provided by sunlight foundation
示例6: _compile_protos
# 需要导入模块: import scandir [as 别名]
# 或者: from scandir import walk [as 别名]
def _compile_protos(proto_files, proto_tree, protoc, argfile, dest):
"""Runs protoc over the collected protos, renames them and rewrites their
imports to make them import from `PB`.
Args:
* proto_files (List[Tuple[src_abspath: str, dest_relpath: str]])
* proto_tree (str): Path to the directory with all the collected .proto
files.
* protoc (str): Path to the protoc binary to use.
* argfile (str): Path to a protoc argfile containing a relative path to
every .proto file in proto_tree on its own line.
* dest (str): Path to the destination where the compiled protos should go.
"""
protoc_proc = subprocess.Popen(
[protoc, '--python_out', dest, '@'+argfile],
cwd=proto_tree, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output, _ = protoc_proc.communicate()
os.remove(argfile)
if protoc_proc.returncode != 0:
replacer = _rel_to_abs_replacer(proto_files)
print >> sys.stderr, "Error while compiling protobufs. Output:\n"
sys.stderr.write(replacer(output))
sys.exit(1)
rewrite_errors = []
for base, _, fnames in OS_WALK(dest):
for name in fnames:
err = _rewrite_and_rename(dest, os.path.join(base, name))
if err:
rewrite_errors.append(err)
with open(os.path.join(base, '__init__.py'), 'wb'):
pass
if rewrite_errors:
print >> sys.stderr, "Error while rewriting generated protos. Output:\n"
replacer = _rel_to_abs_replacer(proto_files)
for error in rewrite_errors:
print >> sys.stderr, replacer(error)
sys.exit(1)
示例7: list
# 需要导入模块: import scandir [as 别名]
# 或者: from scandir import walk [as 别名]
def list(self):
self.log('info', "List of available RAT modules:")
for folder, folders, files in walk(os.path.join(CIRTKIT_ROOT, 'modules/rats/')):
for file_name in files:
if not file_name.endswith('.py') or file_name.startswith('__init__'):
continue
self.log('item', os.path.join(folder, file_name))
示例8: _walk
# 需要导入模块: import scandir [as 别名]
# 或者: from scandir import walk [as 别名]
def _walk(directory, enable_scandir=False, **kwargs):
"""
Internal function to return walk generator either from os or scandir
:param directory: directory to traverse
:param enable_scandir: on python < 3.5 enable external scandir package
:param kwargs: arguments to pass to walk function
:return: walk generator
"""
walk = os.walk
if python_version < (3, 5) and enable_scandir:
import scandir
walk = scandir.walk
return walk(directory, **kwargs)
示例9: _find_paths
# 需要导入模块: import scandir [as 别名]
# 或者: from scandir import walk [as 别名]
def _find_paths(self, current_dir, patterns):
"""Recursively generates absolute paths whose components
underneath current_dir match the corresponding pattern in
patterns"""
pattern = patterns[0]
patterns = patterns[1:]
has_wildcard = is_pattern(pattern)
using_globstar = pattern == "**"
# This avoids os.listdir() for performance
if has_wildcard:
entries = [x.name for x in scandir(current_dir)]
else:
entries = [pattern]
if using_globstar:
matching_subdirs = map(lambda x: x[0], walk(current_dir))
else:
subdirs = [e for e in entries
if os.path.isdir(os.path.join(current_dir, e))]
matching_subdirs = match_entries(subdirs, pattern)
# For terminal globstar, add a pattern for all files in subdirs
if using_globstar and not patterns:
patterns = ['*']
if patterns: # we've still got more directories to traverse
for subdir in matching_subdirs:
absolute_path = os.path.join(current_dir, subdir)
for match in self._find_paths(absolute_path, patterns):
yield match
else: # we've got the last pattern
if not has_wildcard:
entries = [pattern + '.wsp', pattern + '.wsp.gz']
files = [e for e in entries
if os.path.isfile(os.path.join(current_dir, e))]
matching_files = match_entries(files, pattern + '.*')
for _basename in matching_files + matching_subdirs:
yield os.path.join(current_dir, _basename)
示例10: _gather_proto_info_from_repo
# 需要导入模块: import scandir [as 别名]
# 或者: from scandir import walk [as 别名]
def _gather_proto_info_from_repo(repo):
"""Gathers all protos from the given repo.
Args:
* repo (RecipeRepo) - The repo to gather all protos from.
Returns List[_ProtoInfo]
"""
# Tuples of
# * fwd-slash path relative to repo.path of where to look for protos.
# * fwd-slash namespace prefix of where these protos should go in the global
# namespace.
pre = repo.simple_cfg.recipes_path
if pre and not pre.endswith('/'):
pre += '/'
scan_path = [
(pre+'recipes/', 'recipes/%s/' % repo.name),
(pre+'recipe_modules/', 'recipe_modules/%s/' % repo.name),
(pre+'recipe_proto/', ''),
]
if repo.name == 'recipe_engine':
scan_path.append((pre+'recipe_engine/', 'recipe_engine/'))
ret = []
for scan_relpath, dest_namespace in scan_path:
for base, dirs, fnames in OS_WALK(os.path.join(repo.path, scan_relpath)):
base = str(base) # base can be unicode
# Skip all '.expected' directories.
dirs[:] = [dname for dname in dirs if not dname.endswith('.expected')]
# fwd-slash relative-to-repo.path version of `base`
relbase = _to_posix(os.path.relpath(base, repo.path))
for fname in fnames:
fname = str(fname) # fname can be unicode
if not fname.endswith('.proto'):
continue
ret.append(_ProtoInfo.create(
repo, scan_relpath, dest_namespace, posixpath.join(relbase, fname)
))
return ret
# This is the version # of the proto generation algorithm, and is mixed into the
# checksum. If you need to change the compilation algorithm/process in any way,
# you should increment this version number to cause all protos to be regenerated
# downstream.
示例11: example_timeseries
# 需要导入模块: import scandir [as 别名]
# 或者: from scandir import walk [as 别名]
def example_timeseries(request):
""" Extract example timeseries returning a dictionary of dataset attributes
"""
path = mkdtemp('_yatsm')
tgz = os.path.join(here, 'data', 'p035r032_testdata.tar.gz')
with tarfile.open(tgz) as tgz:
tgz.extractall(path)
request.addfinalizer(partial(shutil.rmtree, path))
# Find data
subset_path = os.path.join(path, 'p035r032', 'images')
stack_images, stack_image_IDs = [], []
for root, dnames, fnames in walk(subset_path):
for fname in fnmatch.filter(fnames, 'L*stack.gtif'):
stack_images.append(os.path.join(root, fname))
stack_image_IDs.append(os.path.basename(root))
stack_images = np.asarray(stack_images)
stack_image_IDs = np.asarray(stack_image_IDs)
# Formulate "images.csv" input_file
input_file = os.path.join(path, 'images.csv')
dates = np.array([_d[9:16]for _d in stack_image_IDs]) # YYYYDOY
sensors = np.array([_id[0:3] for _id in stack_image_IDs]) # Landsat IDs
df = pd.DataFrame({
'date': dates,
'sensor': sensors,
'filename': stack_images
})
# Sort by date
pd_ver = pd.__version__.split('.')
if pd_ver[0] == '0' and int(pd_ver[1]) < 17:
df = df.sort(columns='date')
else:
df = df.sort_values(by='date')
df.to_csv(input_file, index=False)
# Copy configuration file
dest_config = os.path.join(path, os.path.basename(yaml_config))
config = yaml.load(open(yaml_config))
config['dataset']['input_file'] = input_file
config['dataset']['output'] = os.path.join(path, 'YATSM')
config['dataset']['cache_line_dir'] = os.path.join(path, 'cache')
config['classification']['training_image'] = example_training
yaml.dump(config, open(dest_config, 'w'))
return {
'path': subset_path,
'images': stack_images,
'image_IDs': stack_image_IDs,
'input_file': input_file,
'images.csv': df,
'config': dest_config,
}
示例12: test_bill_extractors
# 需要导入模块: import scandir [as 别名]
# 或者: from scandir import walk [as 别名]
def test_bill_extractors():
base_path = "/mnt/data/sunlight/dssg/scraped_bills/"
state_codes = os.listdir("/mnt/data/sunlight/dssg/scraped_bills/")
for state_code in state_codes:
data_path = "{0}/{1}".format(base_path,state_code)
bill_files = []
for dirname, dirnames, filenames in walk(data_path):
for filename in filenames:
bill_files.append(os.path.join(dirname, filename))
random.shuffle(bill_files)
num_tests = 10
num_errors = 0
for i,bill_file in enumerate(bill_files[0:num_tests]):
json_obj = ujson.decode(open(bill_file).read())
try:
bill_document = base64.b64decode(json_obj['versions'][0]['bill_document'])
except:
num_tests -= 1
continue
try:
mimetype = json_obj['versions'][0]['mimetype']
except KeyError:
mimetype = json_obj['versions'][0]['+mimetype']
bill_text = bill_text_extractor(state_code,bill_document,mimetype,json_obj['versions'][0]['url'])
if bill_text == None:
num_errors +=1
if 100*(1-(num_errors/num_tests)) < 100.0:
output = "passed {:.2f}% number of tests for state {:s} with {:d} tests".format(
100*(1-(num_errors/num_tests)),state_code,num_tests)
print output.upper()
示例13: _get_stats
# 需要导入模块: import scandir [as 别名]
# 或者: from scandir import walk [as 别名]
def _get_stats(self, directory, dirtagname, subdirtagname, subdirtagname_regex, pattern, recurse):
orig_dirtags = [dirtagname + ":%s" % directory]
pat = re.compile(subdirtagname_regex)
# Initialize state for subdirectories
subdirs = {}
for root, dirs, files in walk(directory):
if root == directory:
for d in dirs:
subdir_path = join(root, d)
if subdirtagname_regex:
m = pat.match(d)
if m:
# Subdir matches
tags = ["%s:%s" % (tagname, tagvalue) for tagname, tagvalue in m.groupdict().iteritems()]
subdirs[subdir_path] = {'name': d, 'files': 0, 'bytes': 0, 'tags': tags}
else:
subdir_tag_value = d
tags = ["%s:%s" % (subdirtagname, subdir_tag_value)]
subdirs[subdir_path] = {'name': d, 'files': 0, 'bytes': 0, 'tags': tags}
# There should only be one case where root == directory, so safe to break
break
# Walk the entire directory and accumulate counts
for root, dirs, files in walk(directory):
directory_files = 0
subdir_bytes = 0
for filename in files:
filename = join(root, filename)
# check if it passes our filter
if not fnmatch(filename, pattern):
continue
directory_files += 1
try:
file_stat = stat(filename)
except OSError, ose:
self.warning("DirectoryCheck: could not stat file %s - %s" % (filename, ose))
else:
subdir_bytes += file_stat.st_size
for subdir in subdirs:
# Append a trailing slash to prevent bad matches
if root == subdir or (recurse and root.startswith("{0}/".format(subdir))):
subdirs[subdir]['files'] += directory_files
subdirs[subdir]['bytes'] += subdir_bytes
# Iterate through subdirectory states and emit metrics