本文整理汇总了Python中moments.path.Path.parent方法的典型用法代码示例。如果您正苦于以下问题:Python Path.parent方法的具体用法?Python Path.parent怎么用?Python Path.parent使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类moments.path.Path
的用法示例。
在下文中一共展示了Path.parent方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from moments.path import Path [as 别名]
# 或者: from moments.path.Path import parent [as 别名]
def main():
if len(sys.argv) > 1:
helps = ['--help', 'help', '-h']
for i in helps:
if i in sys.argv:
usage()
exit()
f1 = sys.argv[1]
if len(sys.argv) > 2:
f2 = sys.argv[2]
else:
f1_path = Path(f1)
f1_dir = f1_path.parent()
f2 = os.path.join(str(f1_dir), "summary.txt")
f2_path = Path(f2)
if not f2_path.exists():
print("Saving output to: %s" % f2)
else:
print("Warning: %s exists!" % f2)
exit()
create_summary(f1, f2)
else:
usage()
exit()
示例2: add_new
# 需要导入模块: from moments.path import Path [as 别名]
# 或者: from moments.path.Path import parent [as 别名]
def add_new(source_list, source_dir, destination=None):
#ignores = ["classics", "misc", "other", "youtube-dl", "playlists"]
ignores = ["playlists"]
m3u = M3U(source_list)
if os.path.isdir(source_dir):
source_dir_path = Path(source_dir)
subdirs = source_dir_path.load().directories
#subdirs = os.listdir(source_dir)
for subdir in subdirs:
print("")
if check_ignore(str(subdir), ignores):
print("SKIP (IGNORES): %s" % subdir)
else:
print("SUBDIR: %s" % subdir)
scan_dir(m3u, subdir)
scan_dir(m3u, source_dir)
else:
print("NOT A DIRECTORY: %s" % source_dir)
print("")
print("")
#for item in m3u:
# print item
if destination is None:
source_list_path = Path(source_list)
dest_name = Timestamp().compact(accuracy="day") + "-videos.m3u"
destination = os.path.join(str(source_list_path.parent()), dest_name)
print("Saving to: %s" % destination)
m3u.save(destination)
示例3: copy_up
# 需要导入模块: from moments.path import Path [as 别名]
# 或者: from moments.path.Path import parent [as 别名]
def copy_up(relative=''):
"""
find the item at the supplied path
and copy it up to the parent directory
this is useful for images that should show up as the default image
"""
global path_root
if re.match('~', relative):
relative = os.path.expanduser(relative)
full_path = os.path.join(path_root, relative)
path = Path(full_path, relative_prefix=path_root)
if path.type() == "Image":
cur_dir = path.parent()
parent = cur_dir.parent()
path.copy(parent)
#this should be sufficient
return "Success!"
else:
return "Failed"
示例4: find_jsons
# 需要导入模块: from moments.path import Path [as 别名]
# 或者: from moments.path.Path import parent [as 别名]
def find_jsons(item, limit_by_name=False, debug=False):
"""
foundation for find_json
but this returns all matches (based on parameters)
"""
if re.search('.*\.json$', item):
if debug:
#print "find_and_load_json: item is a json string: %s" % item
logging.debug("find_json: item is a json string: %s" % item)
return [item]
else:
parent = ''
name = ''
p = Path(item)
if p.type() == "Directory":
#item must be a directory... just look here
parent = p
d = p.load()
else:
name = to_tag(p.name)
#must be some other file type... load the parent directory:
parent = p.parent()
d = parent.load()
if debug:
print("%s not a directory, using: %s" % (item, parent))
matches = []
for j in d.files:
#if debug:
# print "Checking: %s" % j
if re.search('\.json$', str(j)):
if debug:
print("matched json: %s" % j)
match = os.path.join(str(parent), str(j))
#this should allow us to hone in on one
#if there is more than one media file in a directory
if name and limit_by_name:
if re.search(name, str(j)):
matches.append(match)
else:
if debug:
print("could not find %s in %s" % (name, str(j)))
else:
matches.append(match)
if debug:
print("Found the following: %s" % matches)
return matches
示例5: copy_media
# 需要导入模块: from moments.path import Path [as 别名]
# 或者: from moments.path.Path import parent [as 别名]
def copy_media(source, source_root, destination_root):
m3u = M3U(source)
total_size = 0
total_items = 0
for item in m3u:
if re.match(source_root, item):
p = Path(item)
relative = p.to_relative(source_root)
sparent = p.parent()
destination = os.path.join(destination_root, relative)
dpath = Path(destination)
dparent = dpath.parent()
print(relative)
print(sparent)
print(destination)
if not os.path.exists(str(dparent)):
os.makedirs(str(dparent))
if not os.path.exists(destination):
p.copy(destination)
else:
print("already have: %s" % destination)
for option in os.listdir(str(sparent)):
soption = os.path.join(str(sparent), option)
spath = Path(soption)
print(spath.type())
if spath.type() != "Movie" and spath.type() != "Directory":
doption = os.path.join(str(dparent), option)
if not os.path.exists(doption):
print("copy here: %s, to %s" % (soption, doption))
shutil.copy(soption, doption)
print()
示例6: find_zips
# 需要导入模块: from moments.path import Path [as 别名]
# 或者: from moments.path.Path import parent [as 别名]
def find_zips(item):
p = Path(item)
if p.type() == "Directory":
root = item
else:
parent = p.parent()
root = str(parent)
matches = []
options = os.listdir(root)
for o in options:
if re.search('.*\.zip$', o):
zipf = os.path.join(root, o)
matches.append(zipf)
return matches
示例7: series
# 需要导入模块: from moments.path import Path [as 别名]
# 或者: from moments.path.Path import parent [as 别名]
def series(type="Image", relative=''):
"""
show the current item in a series
along with links to previous and next
"""
global path_root
if re.match('~', relative):
relative = os.path.expanduser(relative)
if not re.match('/', relative):
relative = os.path.join(path_root, relative)
path = Path(relative, relative_prefix=path_root)
if path.type() != "Directory":
parent = path.parent()
parent_dir = parent.load()
#parent_dir.sort_by_date()
parent_dir.sort_by_path()
parent_dir.scan_filetypes()
if path.type() == "Image":
count = 0
position = None
for i in parent_dir.images:
if str(i) == str(path):
position = count
break
count += 1
if position is None:
raise ValueError("Couldn't find matching image in directory: %s" % str(parent))
else:
if position != 0:
prev_pos = position-1
else:
prev_pos = 0
previous = parent_dir.images[prev_pos]
nexts = []
next_len = 5
end = position + next_len
if end >= len(parent_dir.images):
nexts = parent_dir.images[position+1:]
else:
nexts = parent_dir.images[position+1:end]
return template('series', path=path, parent=parent, previous=previous, nexts=nexts)
示例8: make_json_path
# 需要导入模块: from moments.path import Path [as 别名]
# 或者: from moments.path.Path import parent [as 别名]
def make_json_path(item):
name = ''
parent = ''
p = Path(item)
if p.type() == "Directory":
#item must be a directory... just look here
parent = p
name = p.name
else:
name = p.name
#must be some other file type... load the parent directory:
parent = p.parent()
#making jsons named as tags to help normalize difficult characters
json_name = "%s.json" % to_tag(name)
#print json_name
return os.path.join(str(parent), json_name)
示例9: find_media
# 需要导入模块: from moments.path import Path [as 别名]
# 或者: from moments.path.Path import parent [as 别名]
def find_media(item):
"""
using this in content.import_content to check for time based media
(videos or sounds)
"""
p = Path(item)
if p.type() == "Directory":
root = p.load()
else:
parent = p.parent()
root = parent.load()
matches = []
#now root is a directory...
#use that to help find media
root.scan_filetypes()
matches.extend(root.sounds)
matches.extend(root.movies)
return matches
示例10: find_and_load_json
# 需要导入模块: from moments.path import Path [as 别名]
# 或者: from moments.path.Path import parent [as 别名]
def find_and_load_json(item, debug=False):
"""
look in the same directory as item for a json file
if found, load it and return the loaded object
otherwise return None
also [2013.07.03 10:30:19]
deprecated:
probably better to find_json()
then load_json()
"""
if re.search('.*\.json', item):
if debug:
#print "find_and_load_json: item is a json string: %s" % item
logging.debug("find_and_load_json: item is a json string: %s" % item)
loaded = load_json(item)
else:
p = Path(item)
if p.type() == "Directory":
parent = p
d = p.load()
else:
parent = p.parent()
d = parent.load()
loaded = None
for j in d.files:
if re.search('.*\.json', str(j)):
logging.debug("find_and_load_json: loading from: %s" % j)
#print "find_and_load_json: loading from: %s" % j
match = os.path.join(str(parent), str(j))
loaded = load_json(match)
#jso = file(os.path.join(str(parent), str(j)))
#loaded = json.loads(jso.read())
return loaded
示例11: Timestamp
# 需要导入模块: from moments.path import Path [as 别名]
# 或者: from moments.path.Path import parent [as 别名]
if translate is not None:
(pre, post) = translate.split(':')
matches = [ pre ]
else:
#can manually define here
#(pre, post) = ('/c/binaries', '')
## matches = [ '/first/path/to/replace/', '/second/path/to/replace/' ]
matches = ['/c/binaries', ]
post = ''
#for arg in sys.argv[1:]:
# make new destination based on source
now = Timestamp()
spath = Path(source_name)
parent = spath.parent()
parts = spath.filename.split('-')
new_parts = [ now.compact(accuracy="day") ]
new_parts.extend(parts[1:])
new_parts.insert(-1, "filtered")
new_name = "-".join(new_parts)
output = os.path.join(str(parent), new_name)
print(output)
#output = 'temp.m3u'
source = file(source_name)
destination = file(output, 'w')
count = 0
for line in source.readlines():
for m in matches:
示例12: gaze_within
# 需要导入模块: from moments.path import Path [as 别名]
# 或者: from moments.path.Path import parent [as 别名]
def gaze_within(source):
"""
at this point we know we have a local path (of some kind)...
now we need to figure out what to do with it.
depending on what we find at the path,
we want to normalize the result into a list of json 'content' objects
these should contain all of the information needed
for displaying any of the variations of objects that may end up on a list
name and default image are two of the most important
this is where you will customize based on the type of list
the inverse will be needed on the post route
This is also the point where you will want to scan sources
for anything new that is not in the list
and add it to the front of the list (or however you want to handle that)
may need to consider an import or convert process for some types
in order to look up more information
and make the necessary metadata (json) files
e.g. convert a moments list to a list of content for a given time
"""
sl = SortableList()
contents = []
current = None
#TODO:
#check for relative path here.. fill in the blanks as needed:
if not re.match('/', source):
raise ValueError("Don't know how to handle relative paths yet")
path = Path(source)
print(path.name)
if path.type() == "Directory":
current = scan_directory(path, sl, contents)
#print item.directories
#parent = item.path.parent()
#print parent.name
elif path.type() == "JSON":
#could load it here... loop over each object
pass
elif path.type() == "List":
#could load it here... loop over each object
#however,
#loading now happens as part of loading the path in scan_directory call
#should choose one or the other
#otherwise we end up with multiple copies of the list
#if os.path.exists(source):
# sl.load(source)
#now that the list has loaded, see if it's a list for the parent dir:
if path.name == path.parent().name:
current = scan_directory(path.parent(), sl, contents)
else:
#TODO
#next option would be to look for a json file with the same name
#if it exists, load that for content list
print("%s != %s" % (path.name, path.parent().name))
exit()
else:
#some other type of file...
#load the parent directory in this case
#and set this file to be the current content
#this can be a time intensive way to calculate this,
#especially if parent directory has a lot of children:
current = scan_directory(path.parent(), sl, contents, path.filename)
#print "Sortable List: %s" % (sl)
#print "Contents: %s" % (contents)
#go ahead and save the updated version
#(e.g. any new files found during scan)
if sl.source:
sl.save()
return (sl, contents, current)
示例13: sort
# 需要导入模块: from moments.path import Path [as 别名]
# 或者: from moments.path.Path import parent [as 别名]
def sort(relative=''):
"""
accept a path to a moment log and enable sorting on the items
using jquery ui for a drag and drop interface
"""
global path_root
if re.match('~', relative):
relative = os.path.expanduser(relative)
if not re.match('/', relative):
relative = path_root + relative
#set some defaults here...
#if they've been changed, this will get over written on load
groups = { "all":[], "edit":[], "slide1":[], "slide2":[], "slide3":[], "slide4":[], "slide5":[], "slide6":[], "slide7":[], "slide8":[], "slide9":[], }
tab_order = ['all', 'edit', "slide1", "slide2", "slide3", "slide4", "slide5", "slide6", "slide7", "slide8", "slide9"]
path = Path(relative, relative_prefix=path_root)
print(path)
if path.exists() and path.type() == "Directory":
response = "Error: need a file name to store the meta data in<br>"
response = "You supplied a directory path: %s<br>" % path
return response
else:
parent_directory = path.parent()
if path.extension == ".txt":
#create a text journal if we don't have one
if not path.exists():
#convert images to journal
#print "PARENT: %s" % parent_directory
directory = parent_directory.load()
#print "directory: %s, of type: %s" % (directory, type(directory))
directory.create_journal(journal=path.filename)
#journal = path.load_journal(create=True)
journal = path.load_journal()
items = []
for e in journal.entries():
new_p = os.path.join(str(parent_directory), e.data.strip())
#print new_p
p = Path(new_p)
#print p.exists()
items.append(p)
#initial version of groups:
destination = Path(relative)
destination.extension = '.json'
groups['all'] = items
elif path.extension == ".json":
#we can make the initial version here...
#skip the generation of a moments log step
if not path.exists():
directory = parent_directory.load()
#print "directory: %s, of type: %s" % (directory, type(directory))
directory.sort_by_date()
directory.scan_filetypes()
groups['all'] = directory.images
else:
loaded = load_groups(str(path))
#template expects all items in groups to be Path objects.
#do that now
groups = {}
for key, value in list(loaded.items()):
groups[key] = []
for v in value:
groups[key].append(Path(v))
destination = Path(relative)
else:
#dunno!
print("UNKNOWN FILE TYPE: %s" % relative)
groups = {}
destination = None
#clean up tab_order as needed
for key in list(groups.keys()):
if not key in tab_order:
tab_order.append(key)
for item in tab_order[:]:
if item not in list(groups.keys()):
tab_order.remove(item)
print(tab_order)
#return template('sort', path=path, items=items)
return template('sort', path=path, groups=groups, destination=destination, tab_order=tab_order)
示例14: reparse
# 需要导入模块: from moments.path import Path [as 别名]
# 或者: from moments.path.Path import parent [as 别名]
def reparse(self):
"""
similar to rescan
but this time go through and regenerate the individual json files
for each content item
from the original HTML source file
this will utilize the customized Scraper IPlugin module
for the given Collection
typically this should be performed by the Scraper itself
during content scans
not sure how useful this will be
other than to make sure integration of YAPSY is working
"""
## print "loading logging"
## import logging
## logging.basicConfig(level=logging.DEBUG)
logging.debug("walking directory for reparse: %s" % self.root)
html_check = re.compile('.*\.html$')
#any directories that do not contain content should be listed here
ignores = [ "pages", "archive" ]
self_root_path = Path(self.root)
parent = self_root_path.parent()
#probably safe to assume this, but...
if os.path.isdir(self.root):
subdirs = self_root_path.load().directories
for subdir in subdirs:
if not check_ignore(str(subdir), ignores):
for root,dirs,files in os.walk(str(subdir)):
for f in files:
if html_check.search(f):
html_file = os.path.join(root, f)
print()
print()
print("Starting check of: %s" % html_file)
json = self.summary.scraper.parse_details(html_file)
self.summary.scraper.save_details(json, html_source=html_file)
#TODO:
#consider moving json saving into parse_details
#to avoid duplication of efforts
## p_root = Path(html_file)
## relative_root = p_root.to_relative(str(parent))
## logging.debug("html relative path: %s" % relative_root)
## #get rid of leading slash
## relative_root = relative_root[1:]
## json['root'] = relative_root
## if json.has_key('date'):
## ts = Timestamp(json['date'])
## else:
## ts = Timestamp(f.split('.')[0])
## json['date'] = str(ts.compact(accuracy="day"))
## json_path = os.path.join(root, ts.filename(".json"))
## save_json(json_path, json)
## self.append(s)
#(or rescan)
print("Finished parsing %s contents manually" % (len(self)))
self.rescan()
示例15: rescan
# 需要导入模块: from moments.path import Path [as 别名]
# 或者: from moments.path.Path import parent [as 别名]
def rescan(self, ignores=[], debug=False):
"""
look for all json files that describe the content items
these should have been generated externally (e.g. during scrape)
json files should contain the main attributes that a Content object has
the rest will be kept in a remainder
parsing html and generating json summaries of content
is beyond the scope of this application
and should be kept outside of this code base (too specific to content)
"""
if not self.root:
raise ValueError("Cannot rescan. No root set on collection: %s" % self.root)
#clear out anything else
del self[:]
if debug:
print("walking directory for contents: %s" % self.root)
json_check = re.compile('.*\.json$')
#it might be inefficient to try to define these here...
#too many different names that might work in different contexts
#ignores = ["contents", "collection", "incompletes"]
#can pass them in if needed...
self_root_path = Path(self.root)
parent = self_root_path.parent()
if not os.path.isdir(self.root):
print("Looking for path of root: %s" % self.root)
print("(is the drive mounted???)")
self.root = os.path.dirname(self.root)
#if we still don't have a directory, something is wrong with root
assert os.path.isdir(self.root)
#instead of looking for ignores
#will limit by convention
#top level directory should only contain meta jsons
#(that should be ignored as content data)
#content jsons will always be in a subdirectory
#similarly, meta jsons should never be in a subdirectory
#for root,dirs,files in os.walk(self.root):
subdirs = self_root_path.load().directories
for subdir in subdirs:
if check_ignore(str(subdir), ignores):
print("Ignoring directory: %s" % (subdir))
else:
for root,dirs,files in os.walk(str(subdir)):
for f in files:
#if json_check.search(f):
if json_check.search(f) and not check_ignore(f, ignores):
json_file = os.path.join(root, f)
p_root = Path(root)
relative_root = p_root.to_relative(str(parent))
#get rid of leading slash
if re.match('/', relative_root):
relative_root = relative_root[1:]
if debug:
print("loading content from: %s" % json_file)
#c = Content(json_file, root=relative_root)
c = Content(json_file)
if debug:
print("setting base_dir to: %s" % relative_root)
#if updating one here, should update the other:
c.base_dir = relative_root
c.drive_dir = str(parent)
self.append(c)
if debug:
print("Finished loading %s contents manually" % (len(self)))