本文整理汇总了Python中os.path.basename函数的典型用法代码示例。如果您正苦于以下问题:Python basename函数的具体用法?Python basename怎么用?Python basename使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了basename函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _find_in_most_specific_archive
def _find_in_most_specific_archive(self, path):
file_name = basename(path)
dir_name = dirname(path)
while len(dir_name) > 0:
archive_name = join(self.data_root, dir_name + archive_extension)
if access(archive_name, R_OK) and \
archive_name not in self.open_archives:
self.open_archives[archive_name] = ZipFile(archive_name, "r")
if archive_name in self.open_archives:
# print "expecting to find file %s in archive %s" % \
# (file_name, archive_name)
return self.open_archives[archive_name].open(file_name)
# XXX open() method is for ZipFile; would be different for
# Tar if we want to change archive type.
file_name = join(basename(dir_name), file_name)
dir_name = dirname(dir_name)
raise IOError(
"File not found (%s), not even in archive; data_root is %s" % \
(path, self.data_root)
)
示例2: cpMCNPproject
def cpMCNPproject(directory):
wkdir=getcwd()
if checkifMCNPproject(directory,1)==1:
return 1
elif checkifMCNPproject(wkdir,2)==2:
return 2
else:
cards = [ path.join(directory,"cards/parameters.part"),
path.join(directory,"cards/materials.part"),
path.join(directory,"cards/source.part"),
path.join(directory,"cards/tallies.part"),
path.join(directory,"cards/traslations.part")]
geom = [ path.join(directory,"geom/cells.part"),
path.join(directory,"geom/surfaces.part")]
for card in cards:
try:
copyfile(card, path.join(wkdir, "cards/",path.basename(card)))
except Exception as e:
print "\n\033[1;34mMCNPmanager cp error:\033[1;32m %s \033[0m\n" % (e)
for g in geom:
try:
copyfile(g, path.join(wkdir, "geom/",path.basename(g)))
except Exception as e:
print "\n\033[1;34mMCNPmanager cp error:\033[1;32m %s \033[0m\n" % (e)
return 0
示例3: vars2png
def vars2png(self, wrfout_path, dom_id, ts_esmf, vars):
"""
Postprocess a list of scalar fields into KMZ files.
:param wrfout_path: WRF file to process
:param dom_id: the domain identifier
:param ts_esmf: time stamp in ESMF format
:param vars: list of variables to process
"""
# open the netCDF dataset
d = nc4.Dataset(wrfout_path)
# extract ESMF string times and identify timestamp of interest
times = [''.join(x) for x in d.variables['Times'][:]]
if ts_esmf not in times:
raise PostprocError("Invalid timestamp %s" % ts_esmf)
tndx = times.index(ts_esmf)
# build one KMZ per variable
for var in vars:
try:
outpath_base = os.path.join(self.output_path, self.product_name + ("-%02d-" % dom_id) + ts_esmf + "-" + var)
if var in ['WINDVEC']:
raster_path, coords = self._vector2png(d, var, tndx, outpath_base)
raster_name = osp.basename(raster_path)
self._update_manifest(dom_id, ts_esmf, var, { 'raster' : raster_name, 'coords' : coords})
else:
raster_path, cb_path, coords = self._scalar2png(d, var, tndx, outpath_base)
mf_upd = { 'raster' : osp.basename(raster_path), 'coords' : coords}
if cb_path is not None:
mf_upd['colorbar'] = osp.basename(cb_path)
self._update_manifest(dom_id, ts_esmf, var, mf_upd)
except Exception as e:
logging.warning("Exception %s while postprocessing %s for time %s into PNG" % (e.message, var, ts_esmf))
logging.warning(traceback.print_exc())
示例4: create_tag
def create_tag(self):
current_stack = inspect.stack()
stackid = 0
while basename(current_stack[stackid][1]) == 'environment.py' or basename(current_stack[stackid][1]) == 'nfs4client.py':
stackid = stackid + 1
test_name = '%s:%s' % (basename(current_stack[stackid][1]), current_stack[stackid][3])
return test_name
示例5: compile_timestamped_transcript_files
def compile_timestamped_transcript_files(json_filenames):
"""
`json_filenames` is a list of filepaths with this filename format:
00900-01000.json
where the left-number represents the starting time offset and
the right-number represents the ending time, in seconds
Each file in this list follows the Watson API standard JSON response
Returns: a dictionary that is the result of concatenating all the json files
into one, with "results" pointing to a list of all returned responses.
To maintain compatibility with Watson's API response, "result_index" key
is included and is set to 0, i.e. it'd be as if the resulting dictionary
is the response returned when sending an entire unbroken soundstream to Watson
"""
compiled_results = []
compiled_dict = {'results': compiled_results, "result_index": 0}
filenames = sorted(json_filenames, key=lambda x: int(basename(x).split('-')[0]))
for fn in filenames:
start_offset_sec = int(basename(fn).split('-')[0])
with open(fn) as f:
data = json.load(f)
for result in data['results']:
for x in result.get('word_alternatives'):
x['start_time'] += start_offset_sec
x['end_time'] += start_offset_sec
for alt in result.get('alternatives'):
for ts in alt['timestamps']:
# each timestamp object is a list:
# ["hi", 9.93, 10.11]
ts[1] += start_offset_sec
ts[2] += start_offset_sec
compiled_results.append(result)
return compiled_dict
示例6: copy_static_entry
def copy_static_entry(source, targetdir, builder, context={},
exclude_matchers=(), level=0):
"""Copy a HTML builder static_path entry from source to targetdir.
Handles all possible cases of files, directories and subdirectories.
"""
if exclude_matchers:
relpath = relative_path(builder.srcdir, source)
for matcher in exclude_matchers:
if matcher(relpath):
return
if path.isfile(source):
target = path.join(targetdir, path.basename(source))
if source.lower().endswith('_t') and builder.templates:
# templated!
fsrc = open(source, 'r', encoding='utf-8')
fdst = open(target[:-2], 'w', encoding='utf-8')
fdst.write(builder.templates.render_string(fsrc.read(), context))
fsrc.close()
fdst.close()
else:
copyfile(source, target)
elif path.isdir(source):
if level == 0:
for entry in os.listdir(source):
if entry.startswith('.'):
continue
copy_static_entry(path.join(source, entry), targetdir,
builder, context, level=1,
exclude_matchers=exclude_matchers)
else:
target = path.join(targetdir, path.basename(source))
if path.exists(target):
shutil.rmtree(target)
shutil.copytree(source, target)
示例7: __init__
def __init__(self, current_file_path=''):
"""
FileDialog constructor.
Args:
current_file_path: the current directory or path to the open flow graph
"""
if not current_file_path: current_file_path = path.join(DEFAULT_FILE_PATH, NEW_FLOGRAPH_TITLE + Preferences.file_extension())
if self.type == OPEN_FLOW_GRAPH:
FileDialogHelper.__init__(self, gtk.FILE_CHOOSER_ACTION_OPEN, 'Open a Flow Graph from a File...')
self.add_and_set_filter(get_flow_graph_files_filter())
self.set_select_multiple(True)
elif self.type == SAVE_FLOW_GRAPH:
FileDialogHelper.__init__(self, gtk.FILE_CHOOSER_ACTION_SAVE, 'Save a Flow Graph to a File...')
self.add_and_set_filter(get_flow_graph_files_filter())
self.set_current_name(path.basename(current_file_path))
elif self.type == SAVE_CONSOLE:
FileDialogHelper.__init__(self, gtk.FILE_CHOOSER_ACTION_SAVE, 'Save Console to a File...')
self.add_and_set_filter(get_text_files_filter())
file_path = path.splitext(path.basename(current_file_path))[0]
self.set_current_name(file_path) #show the current filename
elif self.type == SAVE_IMAGE:
FileDialogHelper.__init__(self, gtk.FILE_CHOOSER_ACTION_SAVE, 'Save a Flow Graph Screen Shot...')
self.add_and_set_filter(get_image_files_filter())
current_file_path = current_file_path + IMAGE_FILE_EXTENSION
self.set_current_name(path.basename(current_file_path)) #show the current filename
elif self.type == OPEN_QSS_THEME:
FileDialogHelper.__init__(self, gtk.FILE_CHOOSER_ACTION_OPEN, 'Open a QSS theme...')
self.add_and_set_filter(get_qss_themes_filter())
self.set_select_multiple(False)
self.set_current_folder(path.dirname(current_file_path)) #current directory
示例8: generate
def generate(self):
self.resources.win_to_unix()
source_files = []
for r_type, n in CoIDE.FILE_TYPES.iteritems():
for file in getattr(self.resources, r_type):
source_files.append({"name": basename(file), "type": n, "path": file})
header_files = []
for r_type, n in CoIDE.FILE_TYPES2.iteritems():
for file in getattr(self.resources, r_type):
header_files.append({"name": basename(file), "type": n, "path": file})
libraries = []
for lib in self.resources.libraries:
l, _ = splitext(basename(lib))
libraries.append(l[3:])
if self.resources.linker_script is None:
self.resources.linker_script = ""
ctx = {
"name": self.program_name,
"source_files": source_files,
"header_files": header_files,
"include_paths": self.resources.inc_dirs,
"scatter_file": self.resources.linker_script,
"library_paths": self.resources.lib_dirs,
"object_files": self.resources.objects,
"libraries": libraries,
"symbols": self.get_symbols(),
}
target = self.target.lower()
# Project file
self.gen_file("coide_%s.coproj.tmpl" % target, ctx, "%s.coproj" % self.program_name)
示例9: test_compare_triples
def test_compare_triples():
for mime, fext in MIME_TYPES.items():
dump_path = path.join(DUMP_DIR, path.basename(mime))
for url in URLs:
if six.PY2:
fname = '%s.%s' % (path.basename(urlparse.urlparse(url).path), fext)
else:
fname = '%s.%s' % (path.basename(urlparse(url).path), fext)
fname = path.join(dump_path, fname)
req = Request(url)
req.add_header('Accept', mime)
res = urlopen(req)
g_fdp.parse(data=res.read(), format=mime)
g_dump.parse(fname, format=mime)
both, first, second = graph_diff(g_fdp, g_dump)
n_first = len(first)
# n_second = len(second)
# n_both = len(both)
assert_equals(
n_first, 0, '{} triple(s) different from reference:\n\n{}===\n{}\n'.format(
n_first, first.serialize(format='turtle'), second.serialize(format='turtle')))
示例10: test_validate_demux_file_infer
def test_validate_demux_file_infer(self):
demux_fp, _, out_dir = self._generate_files({'s1': 'SKB2.640194',
's2': 'SKM4.640180'})
prep_info = {"1.SKB2.640194": {"not_a_run_prefix": "s1"},
"1.SKM4.640180": {"not_a_run_prefix": "s2"},
"1.SKB3.640195": {"not_a_run_prefix": "s3"},
"1.SKB6.640176": {"not_a_run_prefix": "s4"}}
files = {'preprocessed_demux': [demux_fp]}
job_id = self._create_template_and_job(
prep_info, files, "Demultiplexed")
obs_success, obs_ainfo, obs_error = _validate_demux_file(
self.qclient, job_id, prep_info, out_dir, demux_fp)
self.assertTrue(obs_success)
name = splitext(basename(demux_fp))[0]
exp_fastq_fp = join(out_dir, "%s.fastq" % name)
exp_fasta_fp = join(out_dir, "%s.fasta" % name)
exp_demux_fp = join(out_dir, basename(demux_fp))
filepaths = [
(exp_fastq_fp, 'preprocessed_fastq'),
(exp_fasta_fp, 'preprocessed_fasta'),
(exp_demux_fp, 'preprocessed_demux')]
exp = [ArtifactInfo(None, "Demultiplexed", filepaths)]
self.assertEqual(obs_ainfo, exp)
self.assertEqual(obs_error, "")
with File(exp_demux_fp) as f:
self.assertItemsEqual(f.keys(), ["1.SKB2.640194", "1.SKM4.640180"])
示例11: rename_file
def rename_file(self, fname):
"""Rename file"""
path, valid = QInputDialog.getText(self, _("Rename"), _("New name:"), QLineEdit.Normal, osp.basename(fname))
if valid:
path = osp.join(osp.dirname(fname), to_text_string(path))
if path == fname:
return
if osp.exists(path):
if (
QMessageBox.warning(
self,
_("Rename"),
_("Do you really want to rename <b>%s</b> and " "overwrite the existing file <b>%s</b>?")
% (osp.basename(fname), osp.basename(path)),
QMessageBox.Yes | QMessageBox.No,
)
== QMessageBox.No
):
return
try:
misc.rename_file(fname, path)
self.parent_widget.renamed.emit(fname, path)
return path
except EnvironmentError as error:
QMessageBox.critical(
self,
_("Rename"),
_("<b>Unable to rename file <i>%s</i></b>" "<br><br>Error message:<br>%s")
% (osp.basename(fname), to_text_string(error)),
)
示例12: _load_rbo
def _load_rbo(self):
"""Load APC2015rbo dataset"""
dataset_dir = osp.join(this_dir, 'dataset/APC2015rbo/berlin_samples')
img_glob = osp.join(dataset_dir, '*_bin_[A-L].jpg')
desc = 'rbo'
for img_file in tqdm.tqdm(glob.glob(img_glob), ncols=80, desc=desc):
basename = osp.splitext(osp.basename(img_file))[0]
# apply mask, crop and save
bin_mask_file = re.sub('.jpg$', '.pbm', img_file)
bin_mask = imread(bin_mask_file, mode='L')
where = np.argwhere(bin_mask)
roi = where.min(0), where.max(0) + 1
id_ = osp.join('rbo', basename)
dataset_index = len(self.ids) - 1
self.datasets['rbo'].append(dataset_index)
mask_glob = re.sub('.jpg$', '_*.pbm', img_file)
mask_files = [None] * self.n_class
for mask_file in glob.glob(mask_glob):
mask_basename = osp.splitext(osp.basename(mask_file))[0]
label_name = re.sub(basename + '_', '', mask_basename)
if label_name == 'shelf':
continue
mask_files[self.target_names.index(label_name)] = mask_file
self.ids.append(id_)
self.rois.append(roi)
self.img_files.append(img_file)
self.mask_files.append(mask_files)
示例13: append_deps_rootpath
def append_deps_rootpath(dep_modules_roots, search_depth=10):
"""
Append all paths described in PackageInfo.dep_modules_roots into the sys.path,
so any module in the package can be called as a main entry,
with successfully importing dependent modules of dependent package in an easy scheme:
'import <identifiable_package_root>.<sub>.<target_module>',
Which could a more intuitive usage of module import.
Be sure this function is called for the main entry script with all outer dependent
package names in the parameter 'dep_modules_roots'
"""
check_path = ospath.dirname(ospath.abspath(__file__))
dep_dirs = []
dep_remains = list(dep_modules_roots)
for i in range(search_depth):
check_path = ospath.dirname(check_path)
check_name = ospath.basename(check_path)
for dep_name in dep_remains:
if dep_name == check_name:
dep_dirs.append(check_path)
dep_remains.remove(dep_name)
if not dep_remains:
break
if dep_dirs:
for dep_dir in dep_dirs:
sys.path.append(dep_dir)
print(BColors.BLUE
+ "Append path of package:'{pkg}'".format(pkg=ospath.basename(dep_dir))
+ " for package:'{name}' to sys.path as dependent modules root."
.format(name=PackageInfo.package_name)
+ BColors.ENDC)
return [name for name in dep_modules_roots if name not in dep_remains]
else:
return None
示例14: __init__
def __init__(self, images, delivery_types=None):
"""
Parameters
----------
images : iterable (list, tuple, etc)
A sequence of paths to the image files.
delivery_types : iterable, None
If None (default), the image paths names must follow the `Naming Convention <http://pylinac.readthedocs.org/en/latest/vmat_docs.html#naming-convention>`_.
If the image paths do not follow the naming convention, a 2-element string sequence for ``delivery_types`` must be passed in. E.g. ``['open', 'dmlc']``.
"""
self.settings = Settings('', 1.5)
# error checks
if len(images) != 2:
raise ValueError("Exactly 2 images (open, DMLC) must be passed")
if delivery_types and len(delivery_types) != 2:
raise ValueError("Delivery types must be 2 elements long")
if delivery_types is None:
delivery_types = []
# walk over images and load the open and DMLC fields
for img, deliv in zip_longest(images, delivery_types, fillvalue=''):
if OPEN in img.lower() or OPEN == deliv.lower():
self.image_open = image.load(img)
elif DMLC in img.lower() or DMLC == deliv.lower():
self.image_dmlc = image.load(img)
else:
raise ValueError("Image file names must follow the naming convention (e.g. 'DRGS_open.dcm'), or the delivery types must be passed explicitly")
# try to determine test type
if all(DRGS in osp.basename(img).lower() for img in images):
self.settings.test_type = DRGS
elif all(DRMLC in osp.basename(img).lower() for img in images):
self.settings.test_type = DRMLC
示例15: _get_matfile_data
def _get_matfile_data():
# compare to Arielle's in the .mat file
mat_file = "sub01_session_1_raw_ROI_timeseries.mat"
mat_file = osp.join(osp.dirname(osp.realpath(__file__)), mat_file)
# to_check.keys() == ['all_rois', 'time_series',
# '__globals__', 'Nvox', '__header__', '__version__']
to_check = sio.loadmat(mat_file)
nvox = to_check['Nvox'][0]
nb_runs = to_check['time_series'].shape[2] # has shape (time, rois, nb_runs)
assert nb_runs == 4
# make a dict for nvox
check_nvox = {}
for idx, roi in enumerate(to_check['all_rois']):
k, _ = osp.splitext(osp.basename(roi[0][0]))
check_nvox[k] = nvox[idx]
# make a dict for signals
arielle_runs = []
for run in range(nb_runs):
check_signals = {}
for idx, roi in enumerate(to_check['all_rois']):
k = osp.splitext(osp.basename(roi[0][0]))[0]
check_signals[k] = to_check['time_series'][:,idx,run]
arielle_runs.append(check_signals)
return check_nvox, arielle_runs