本文整理匯總了Python中glob.glob方法的典型用法代碼示例。如果您正苦於以下問題:Python glob.glob方法的具體用法?Python glob.glob怎麽用?Python glob.glob使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類glob
的用法示例。
在下文中一共展示了glob.glob方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: scan
# 需要導入模塊: import glob [as 別名]
# 或者: from glob import glob [as 別名]
def scan(self, path, exclude=[]) -> List[str]:
"""Scan path for matching files.
:param path: the path to scan
:param exclude: a list of directories to exclude
:return: a list of sorted filenames
"""
res = []
path = path.rstrip("/").rstrip("\\")
for pat in self.input_patterns:
res.extend(glob.glob(path + os.sep + pat, recursive=True))
res = list(filter(lambda p: os.path.isfile(p), res))
if exclude:
def excluded(path):
for e in exclude:
if path.startswith(e):
return True
return False
res = list(filter(lambda p: not excluded(p), res))
return sorted(res)
示例2: main
# 需要導入模塊: import glob [as 別名]
# 或者: from glob import glob [as 別名]
def main(_):
"""Runs `text_utils.simplify_nq_example` over all shards of a split.
Prints simplified examples to a single gzipped file in the same directory
as the input shards.
"""
split = os.path.basename(FLAGS.data_dir)
outpath = os.path.join(FLAGS.data_dir,
"simplified-nq-{}.jsonl.gz".format(split))
with gzip.open(outpath, "wb") as fout:
num_processed = 0
start = time.time()
for inpath in glob.glob(os.path.join(FLAGS.data_dir, "nq-*-??.jsonl.gz")):
print("Processing {}".format(inpath))
with gzip.open(inpath, "rb") as fin:
for l in fin:
utf8_in = l.decode("utf8", "strict")
utf8_out = json.dumps(
text_utils.simplify_nq_example(json.loads(utf8_in))) + u"\n"
fout.write(utf8_out.encode("utf8"))
num_processed += 1
if not num_processed % 100:
print("Processed {} examples in {}.".format(num_processed,
time.time() - start))
示例3: find_previous
# 需要導入模塊: import glob [as 別名]
# 或者: from glob import glob [as 別名]
def find_previous(self):
sfiles = os.path.join(self.output_dir, cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_*.pth')
sfiles = glob.glob(sfiles)
sfiles.sort(key=os.path.getmtime)
# Get the snapshot name in pytorch
redfiles = []
for stepsize in cfg.TRAIN.STEPSIZE:
redfiles.append(os.path.join(self.output_dir,
cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_{:d}.pth'.format(stepsize+1)))
sfiles = [ss for ss in sfiles if ss not in redfiles]
nfiles = os.path.join(self.output_dir, cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_*.pkl')
nfiles = glob.glob(nfiles)
nfiles.sort(key=os.path.getmtime)
redfiles = [redfile.replace('.pth', '.pkl') for redfile in redfiles]
nfiles = [nn for nn in nfiles if nn not in redfiles]
lsf = len(sfiles)
assert len(nfiles) == lsf
return lsf, nfiles, sfiles
開發者ID:Sunarker,項目名稱:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代碼行數:23,代碼來源:train_val.py
示例4: main
# 需要導入模塊: import glob [as 別名]
# 或者: from glob import glob [as 別名]
def main():
opt, logger, vis = utils.build(is_train=False)
dloader = data.get_data_loader(opt)
print('Val dataset: {}'.format(len(dloader.dataset)))
model = models.get_model(opt)
for epoch in opt.which_epochs:
# Load checkpoint
if epoch == -1:
# Find the latest checkpoint
checkpoints = glob.glob(os.path.join(opt.ckpt_path, 'net*.pth'))
assert len(checkpoints) > 0
epochs = [int(filename.split('_')[-1][:-4]) for filename in checkpoints]
epoch = max(epochs)
logger.print('Loading checkpoints from {}, epoch {}'.format(opt.ckpt_path, epoch))
model.load(opt.ckpt_path, epoch)
results = evaluate(opt, dloader, model)
for metric in results:
logger.print('{}: {}'.format(metric, results[metric]))
示例5: index
# 需要導入模塊: import glob [as 別名]
# 或者: from glob import glob [as 別名]
def index(request):
ctx = get_context(request)
cname = os.environ["PORTAL_CNAME"]
template_dir = get_app_template_dirs("templates/notebooks")[0]
htmls = os.path.join(template_dir, cname, "*.html")
ctx["notebooks"] = [
p.split("/" + cname + "/")[-1].replace(".html", "") for p in glob(htmls)
]
ctx["PORTAL_CNAME"] = cname
ctx["landing_pages"] = []
mask = ["project", "title", "authors", "is_public", "description", "urls"]
client = Client(headers=get_consumer(request)) # sets/returns global variable
entries = client.projects.get_entries(_fields=mask).result()["data"]
for entry in entries:
authors = entry["authors"].strip().split(",", 1)
if len(authors) > 1:
authors[1] = authors[1].strip()
entry["authors"] = authors
entry["description"] = entry["description"].split(".", 1)[0] + "."
ctx["landing_pages"].append(
entry
) # visibility governed by is_public flag and X-Consumer-Groups header
return render(request, "home.html", ctx.flatten())
示例6: create_celeba
# 需要導入模塊: import glob [as 別名]
# 或者: from glob import glob [as 別名]
def create_celeba(tfrecord_dir, celeba_dir, cx=89, cy=121):
print('Loading CelebA from "%s"' % celeba_dir)
glob_pattern = os.path.join(celeba_dir, 'img_align_celeba_png', '*.png')
image_filenames = sorted(glob.glob(glob_pattern))
expected_images = 202599
if len(image_filenames) != expected_images:
error('Expected to find %d images' % expected_images)
with TFRecordExporter(tfrecord_dir, len(image_filenames)) as tfr:
order = tfr.choose_shuffled_order()
for idx in range(order.size):
img = np.asarray(PIL.Image.open(image_filenames[order[idx]]))
assert img.shape == (218, 178, 3)
img = img[cy - 64 : cy + 64, cx - 64 : cx + 64]
img = img.transpose(2, 0, 1) # HWC => CHW
tfr.add_image(img)
#----------------------------------------------------------------------------
示例7: locate_result_subdir
# 需要導入模塊: import glob [as 別名]
# 或者: from glob import glob [as 別名]
def locate_result_subdir(run_id_or_result_subdir):
if isinstance(run_id_or_result_subdir, str) and os.path.isdir(run_id_or_result_subdir):
return run_id_or_result_subdir
searchdirs = []
searchdirs += ['']
searchdirs += ['results']
searchdirs += ['networks']
for searchdir in searchdirs:
dir = config.result_dir if searchdir == '' else os.path.join(config.result_dir, searchdir)
dir = os.path.join(dir, str(run_id_or_result_subdir))
if os.path.isdir(dir):
return dir
prefix = '%03d' % run_id_or_result_subdir if isinstance(run_id_or_result_subdir, int) else str(run_id_or_result_subdir)
dirs = sorted(glob.glob(os.path.join(config.result_dir, searchdir, prefix + '-*')))
dirs = [dir for dir in dirs if os.path.isdir(dir)]
if len(dirs) == 1:
return dirs[0]
raise IOError('Cannot locate result subdir for run', run_id_or_result_subdir)
示例8: data_stat
# 需要導入模塊: import glob [as 別名]
# 或者: from glob import glob [as 別名]
def data_stat():
"""data statistic"""
audio_path = './data/esc10/audio/'
class_list = [os.path.basename(i) for i in glob(audio_path + '*')]
nums_each_class = [len(glob(audio_path + cl + '/*.ogg')) for cl in class_list]
rects = plt.bar(range(len(nums_each_class)), nums_each_class)
index = list(range(len(nums_each_class)))
plt.title('Numbers of each class for ESC-10 dataset')
plt.ylim(ymax=60, ymin=0)
plt.xticks(index, class_list, rotation=45)
plt.ylabel("numbers")
for rect in rects:
height = rect.get_height()
plt.text(rect.get_x() + rect.get_width() / 2, height, str(height), ha='center', va='bottom')
plt.tight_layout()
plt.show()
示例9: get_cifar10
# 需要導入模塊: import glob [as 別名]
# 或者: from glob import glob [as 別名]
def get_cifar10(data_dir):
if not os.path.isdir(data_dir):
os.system("mkdir " + data_dir)
cwd = os.path.abspath(os.getcwd())
os.chdir(data_dir)
if (not os.path.exists('train.rec')) or \
(not os.path.exists('test.rec')) :
import urllib, zipfile, glob
dirname = os.getcwd()
zippath = os.path.join(dirname, "cifar10.zip")
urllib.urlretrieve("http://data.mxnet.io/mxnet/data/cifar10.zip", zippath)
zf = zipfile.ZipFile(zippath, "r")
zf.extractall()
zf.close()
os.remove(zippath)
for f in glob.glob(os.path.join(dirname, "cifar", "*")):
name = f.split(os.path.sep)[-1]
os.rename(f, os.path.join(dirname, name))
os.rmdir(os.path.join(dirname, "cifar"))
os.chdir(cwd)
# data
示例10: test_completeness
# 需要導入模塊: import glob [as 別名]
# 或者: from glob import glob [as 別名]
def test_completeness(self):
"""
Make sure that every tutorial that isn't in the whitelist is considered for testing by this
file. Exceptions should be added to the whitelist.
N.B. If the test is commented out, then that will be viewed as an intentional disabling of the
test.
"""
# Open up this test file.
with open(__file__, 'r') as f:
notebook_test_text = '\n'.join(f.readlines())
notebooks_path = os.path.join(os.path.dirname(__file__), 'straight_dope_book')
notebooks = glob.glob(os.path.join(notebooks_path, '**', '*.ipynb'))
# Compile a list of notebooks that are tested
tested_notebooks = set(re.findall(r"assert _test_notebook\('(.*)'\)", notebook_test_text))
# Ensure each notebook in the straight dope book directory is on the whitelist or is tested.
for notebook in notebooks:
friendly_name = '/'.join(notebook.split('/')[-2:]).split('.')[0]
if friendly_name not in tested_notebooks and friendly_name not in NOTEBOOKS_WHITELIST:
assert False, friendly_name + " has not been added to the nightly/tests/straight_" + \
"dope/test_notebooks_single_gpu.py test_suite. Consider also adding " + \
"it to nightly/tests/straight_dope/test_notebooks_multi_gpu.py as " + \
"well if the notebooks makes use of multiple GPUs."
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:27,代碼來源:test_notebooks_single_gpu.py
示例11: test_tutorial_downloadable
# 需要導入模塊: import glob [as 別名]
# 或者: from glob import glob [as 別名]
def test_tutorial_downloadable():
"""
Make sure that every tutorial that isn't in the whitelist has the placeholder
that enables notebook download
"""
download_button_string = '<!-- INSERT SOURCE DOWNLOAD BUTTONS -->'
tutorial_path = os.path.join(os.path.dirname(__file__), '..', '..', 'docs', 'tutorials')
tutorials = glob.glob(os.path.join(tutorial_path, '**', '*.md'))
for tutorial in tutorials:
with open(tutorial, 'r') as file:
lines= file.readlines()
last = lines[-1]
second_last = lines[-2]
downloadable = download_button_string in last or download_button_string in second_last
friendly_name = '/'.join(tutorial.split('/')[-2:])
if not downloadable and friendly_name not in whitelist_set:
print(last, second_last)
assert False, "{} is missing <!-- INSERT SOURCE DOWNLOAD BUTTONS --> as its last line".format(friendly_name)
示例12: test_multiprocessing_download_successful
# 需要導入模塊: import glob [as 別名]
# 或者: from glob import glob [as 別名]
def test_multiprocessing_download_successful():
""" test download with multiprocessing """
tmp = tempfile.mkdtemp()
tmpfile = os.path.join(tmp, 'README.md')
process_list = []
# test it with 10 processes
for i in range(10):
process_list.append(mp.Process(
target=_download_successful, args=(tmpfile,)))
process_list[i].start()
for i in range(10):
process_list[i].join()
assert os.path.getsize(tmpfile) > 100, os.path.getsize(tmpfile)
# check only one file we want left
pattern = os.path.join(tmp, 'README.md*')
assert len(glob.glob(pattern)) == 1, glob.glob(pattern)
# delete temp dir
shutil.rmtree(tmp)
示例13: _get_random_object
# 需要導入模塊: import glob [as 別名]
# 或者: from glob import glob [as 別名]
def _get_random_object(self, num_objects, test):
"""Randomly choose an object urdf from the random_urdfs directory.
Args:
num_objects:
Number of graspable objects.
Returns:
A list of urdf filenames.
"""
if test:
urdf_pattern = os.path.join(self._urdfRoot, 'random_urdfs/*0/*.urdf')
else:
urdf_pattern = os.path.join(self._urdfRoot, 'random_urdfs/*[^0]/*.urdf')
found_object_directories = glob.glob(urdf_pattern)
total_num_objects = len(found_object_directories)
selected_objects = np.random.choice(np.arange(total_num_objects),
num_objects)
selected_objects_filenames = []
for object_index in selected_objects:
selected_objects_filenames += [found_object_directories[object_index]]
return selected_objects_filenames
示例14: main
# 需要導入模塊: import glob [as 別名]
# 或者: from glob import glob [as 別名]
def main():
quiet = len(sys.argv) >= 2 and sys.argv[1] == '-q'
file_names = glob.glob('openfermion-cirq/**/*.py', recursive=True)
failed, attempted = run_tests(file_names,
include_modules=True,
include_local=False,
quiet=quiet)
if failed != 0:
print(
shell_tools.highlight(
f'Failed: {failed} failed, '
'{attempted - failed} passed, {attempted} total',
shell_tools.RED))
sys.exit(1)
else:
print(shell_tools.highlight(f'Passed: {attempted}', shell_tools.GREEN))
sys.exit(0)
示例15: copy_dependencies
# 需要導入模塊: import glob [as 別名]
# 或者: from glob import glob [as 別名]
def copy_dependencies(f):
config_path = '/etc/yangcatalog/yangcatalog.conf'
config = ConfigParser.ConfigParser()
config._interpolation = ConfigParser.ExtendedInterpolation()
config.read(config_path)
yang_models = config.get('Directory-Section', 'save-file-dir')
tmp = config.get('Directory-Section', 'temp')
out = f.getvalue()
letters = string.ascii_letters
suffix = ''.join(random.choice(letters) for i in range(8))
dep_dir = '{}/yangvalidator-dependencies-{}'.format(tmp, suffix)
os.mkdir(dep_dir)
dependencies = out.split(':')[1].strip().split(' ')
for dep in dependencies:
for file in glob.glob(r'{}/{}*.yang'.format(yang_models, dep)):
shutil.copy(file, dep_dir)
return dep_dir