本文整理汇总了Python中nameTools.makeFilenameSafe函数的典型用法代码示例。如果您正苦于以下问题:Python makeFilenameSafe函数的具体用法?Python makeFilenameSafe怎么用?Python makeFilenameSafe使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了makeFilenameSafe函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: getLink
def getLink(self, linkDict):
try:
linkDict = self.getDownloadInfo(linkDict)
images = self.getImages(linkDict)
title = linkDict['title']
artist = linkDict['artist']
except webFunctions.ContentError:
self.updateDbEntry(linkDict["sourceUrl"], dlState=-2, downloadPath="ERROR", fileName="ERROR: FAILED")
return False
if images and title:
fileN = title+" "+artist+".zip"
fileN = nt.makeFilenameSafe(fileN)
# self.log.info("geturl with processing", fileN)
wholePath = os.path.join(linkDict["dirPath"], fileN)
wholePath = self.insertCountIfFilenameExists(wholePath)
self.log.info("Complete filepath: %s", wholePath)
#Write all downloaded files to the archive.
try:
arch = zipfile.ZipFile(wholePath, "w")
except OSError:
title = title.encode('ascii','ignore').decode('ascii')
fileN = title+".zip"
fileN = nt.makeFilenameSafe(fileN)
wholePath = os.path.join(linkDict["dirPath"], fileN)
arch = zipfile.ZipFile(wholePath, "w")
for imageName, imageContent in images:
arch.writestr(imageName, imageContent)
arch.close()
self.log.info("Successfully Saved to path: %s", wholePath)
self.updateDbEntry(linkDict["sourceUrl"], downloadPath=linkDict["dirPath"], fileName=fileN)
# Deduper uses the path info for relinking, so we have to dedup the item after updating the downloadPath and fileN
dedupState = processDownload.processDownload(None, wholePath, pron=True, deleteDups=True, includePHash=True, rowId=linkDict['dbId'])
self.log.info( "Done")
if dedupState:
self.addTags(sourceUrl=linkDict["sourceUrl"], tags=dedupState)
self.updateDbEntry(linkDict["sourceUrl"], dlState=2)
return wholePath
else:
self.updateDbEntry(linkDict["sourceUrl"], dlState=-1, downloadPath="ERROR", fileName="ERROR: FAILED")
return False
示例2: insertNames
def insertNames(self, buId, names):
self.log.info("Updating name synonym table for %s with %s name(s).", buId, len(names))
with self.transaction() as cur:
# delete the old names from the table, so if they're removed from the source, we'll match that.
cur.execute("DELETE FROM {tableName} WHERE buId=%s;".format(tableName=self.nameMapTableName), (buId, ))
alreadyAddedNames = []
for name in names:
fsSafeName = nt.prepFilenameForMatching(name)
if not fsSafeName:
fsSafeName = nt.makeFilenameSafe(name)
# we have to block duplicate names. Generally, it's pretty common
# for multiple names to screen down to the same name after
# passing through `prepFilenameForMatching()`.
if fsSafeName in alreadyAddedNames:
continue
alreadyAddedNames.append(fsSafeName)
cur.execute("""INSERT INTO %s (buId, name, fsSafeName) VALUES (%%s, %%s, %%s);""" % self.nameMapTableName, (buId, name, fsSafeName))
self.log.info("Updated!")
示例3: getLink
def getLink(self, link):
sourceUrl = link["sourceUrl"]
seriesName = link["seriesName"]
originFileName = link["originName"]
self.updateDbEntry(sourceUrl, dlState=1)
self.log.info("Downloading = '%s', '%s'", seriesName, originFileName)
dlPath, newDir = self.locateOrCreateDirectoryForSeries(seriesName)
if link["flags"] == None:
link["flags"] = ""
if newDir:
self.updateDbEntry(sourceUrl, flags=" ".join([link["flags"], "haddir"]))
self.conn.commit()
try:
content, headerName = self.getLinkFile(sourceUrl)
except:
self.log.error("Unrecoverable error retreiving content %s", link)
self.log.error("Traceback: %s", traceback.format_exc())
self.updateDbEntry(sourceUrl, dlState=-1)
return
headerName = urllib.parse.unquote(headerName)
fName = "%s - %s" % (originFileName, headerName)
fName = nt.makeFilenameSafe(fName)
fName, ext = os.path.splitext(fName)
fName = "%s [CXC Scans]%s" % (fName, ext)
fqFName = os.path.join(dlPath, fName)
self.log.info("SaveName = %s", fqFName)
loop = 1
while os.path.exists(fqFName):
fName, ext = os.path.splitext(fName)
fName = "%s (%d)%s" % (fName, loop, ext)
fqFName = os.path.join(link["targetDir"], fName)
loop += 1
self.log.info("Writing file")
filePath, fileName = os.path.split(fqFName)
try:
with open(fqFName, "wb") as fp:
fp.write(content)
except TypeError:
self.log.error("Failure trying to retreive content from source %s", sourceUrl)
self.updateDbEntry(sourceUrl, dlState=-4, downloadPath=filePath, fileName=fileName)
return
# self.log.info( filePath)
dedupState = processDownload.processDownload(seriesName, fqFName, deleteDups=True)
self.log.info("Done")
self.updateDbEntry(sourceUrl, dlState=2, downloadPath=filePath, fileName=fileName, tags=dedupState)
return
示例4: getDownloadInfo
def getDownloadInfo(self, linkDict, soup):
infoSection = soup.find("div", id='infobox')
category, tags, artist = self.getCategoryTags(infoSection)
tags = ' '.join(tags)
linkDict['artist'] = artist
linkDict['title'] = self.getFileName(infoSection)
linkDict['dirPath'] = os.path.join(settings.djOnSettings["dlDir"], nt.makeFilenameSafe(category))
if not os.path.exists(linkDict["dirPath"]):
os.makedirs(linkDict["dirPath"])
else:
self.log.info("Folder Path already exists?: %s", linkDict["dirPath"])
self.log.info("Folderpath: %s", linkDict["dirPath"])
self.log.debug("Linkdict = ")
for key, value in list(linkDict.items()):
self.log.debug(" %s - %s", key, value)
if tags:
self.log.info("Adding tag info %s", tags)
self.addTags(sourceUrl=linkDict["sourceUrl"], tags=tags)
self.updateDbEntry(linkDict["sourceUrl"], seriesName=category, lastUpdate=time.time())
return linkDict
示例5: getDoujinshiUploadDirectory
def getDoujinshiUploadDirectory(self, seriesName):
ulDir = self.getExistingDir(seriesName)
if not ulDir:
seriesName = nt.getCanonicalMangaUpdatesName(seriesName)
safeFilename = nt.makeFilenameSafe(seriesName)
matchName = nt.prepFilenameForMatching(seriesName)
matchName = matchName.encode('latin-1', 'ignore').decode('latin-1')
self.checkInitDirs()
if matchName in self.unsortedDirs:
ulDir = self.unsortedDirs[matchName]
elif safeFilename in self.unsortedDirs:
ulDir = self.unsortedDirs[safeFilename]
else:
self.log.info("Need to create container directory for %s", seriesName)
ulDir = os.path.join(settings.mkSettings["uploadContainerDir"], settings.mkSettings["uploadDir"], safeFilename)
try:
self.sftp.mkdir(ulDir)
except ftplib.error_perm:
self.log.warn("Directory exists?")
self.log.warn(traceback.format_exc())
return ulDir
示例6: getUploadDirectory
def getUploadDirectory(self, seriesName):
ulDir = self.getExistingDir(seriesName)
if not ulDir:
seriesName = nt.getCanonicalMangaUpdatesName(seriesName)
safeFilename = nt.makeFilenameSafe(seriesName)
matchName = nt.prepFilenameForMatching(seriesName)
matchName = matchName.encode('utf-8', 'ignore').decode('utf-8')
self.checkInitDirs()
if matchName in self.mainDirs:
ulDir = self.mainDirs[matchName][0]
elif seriesName in self.mainDirs:
ulDir = self.mainDirs[seriesName][0]
else:
self.log.info("Need to create container directory for %s", seriesName)
ulDir = os.path.join(settings.mkSettings["uploadContainerDir"], settings.mkSettings["uploadDir"], safeFilename)
try:
self.sftp.mkdir(ulDir)
except OSError as e:
# If the error is just a "directory exists" warning, ignore it silently
if str(e) == 'OSError: File already exists':
pass
else:
self.log.warn("Error creating directory?")
self.log.warn(traceback.format_exc())
return ulDir
示例7: getUploadDirectory
def getUploadDirectory(self, seriesName):
ulDir = self.getExistingDir(seriesName)
if not ulDir:
seriesName = nt.getCanonicalMangaUpdatesName(seriesName)
safeFilename = nt.makeFilenameSafe(seriesName)
matchName = nt.prepFilenameForMatching(seriesName)
matchName = matchName.encode('latin-1', 'ignore').decode('latin-1')
self.checkInitDirs()
if matchName in self.unsortedDirs:
ulDir = self.unsortedDirs[matchName]
elif safeFilename in self.unsortedDirs:
ulDir = self.unsortedDirs[safeFilename]
else:
self.log.info("Need to create container directory for %s", seriesName)
ulDir = os.path.join(settings.mkSettings["uploadContainerDir"], settings.mkSettings["uploadDir"], safeFilename)
try:
self.ftp.mkd(ulDir)
except ftplib.error_perm as e:
# If the error is just a "directory exists" warning, ignore it silently
if str(e).startswith("550") and str(e).endswith('File exists'):
pass
else:
self.log.warn("Error creating directory?")
self.log.warn(traceback.format_exc())
return ulDir
示例8: getLink
def getLink(self, link):
sourceUrl, originFileName = link["sourceUrl"], link["originName"]
self.log.info( "Should retreive: %s, url - %s", originFileName, sourceUrl)
self.updateDbEntry(sourceUrl, dlState=1)
self.conn.commit()
fileUrl = self.getDownloadUrl(sourceUrl)
if fileUrl is None:
self.log.warning("Could not find url!")
self.deleteRowsByValue(sourceUrl=sourceUrl)
return
try:
content, hName = self.getLinkFile(fileUrl, sourceUrl)
except:
self.log.error("Unrecoverable error retreiving content %s", link)
self.log.error("Traceback: %s", traceback.format_exc())
self.updateDbEntry(sourceUrl, dlState=-1)
return
# print("Content type = ", type(content))
# And fix %xx crap
hName = urllib.parse.unquote(hName)
fName = "%s - %s" % (originFileName, hName)
fName = nt.makeFilenameSafe(fName)
fqFName = os.path.join(link["targetDir"], fName)
self.log.info( "SaveName = %s", fqFName)
loop = 1
while os.path.exists(fqFName):
fName = "%s - (%d) - %s" % (originFileName, loop, hName)
fqFName = os.path.join(link["targetDir"], fName)
loop += 1
self.log.info( "Writing file")
filePath, fileName = os.path.split(fqFName)
try:
with open(fqFName, "wb") as fp:
fp.write(content)
except TypeError:
self.log.error("Failure trying to retreive content from source %s", sourceUrl)
return
#self.log.info( filePath)
dedupState = processDownload.processDownload(link["seriesName"], fqFName, deleteDups=True, includePHash=True)
self.log.info( "Done")
self.updateDbEntry(sourceUrl, dlState=2, downloadPath=filePath, fileName=fileName, tags=dedupState)
return
示例9: renameSeriesToMatchMangaUpdates
def renameSeriesToMatchMangaUpdates(scanpath):
idLut = nt.MtNamesMapWrapper("fsName->buId")
muLut = nt.MtNamesMapWrapper("buId->buName")
db = DbInterface()
print("Scanning")
foundDirs = 0
contents = os.listdir(scanpath)
for dirName in contents:
cName = nt.prepFilenameForMatching(dirName)
mtId = idLut[cName]
if mtId and len(mtId) > 1:
print("Multiple mtId values for '%s' ('%s')" % (cName, dirName))
print(" ", mtId)
print(" Skipping item")
elif mtId:
mtId = mtId.pop()
mtName = muLut[mtId].pop()
cMtName = nt.prepFilenameForMatching(mtName)
if cMtName != cName:
print("Dir '%s' ('%s')" % (cName, dirName))
print(" Should be '%s'" % (mtName, ))
print(" URL: https://www.mangaupdates.com/series.html?id=%s" % (mtId, ))
oldPath = os.path.join(scanpath, dirName)
newPath = os.path.join(scanpath, nt.makeFilenameSafe(mtName))
if not os.path.isdir(oldPath):
raise ValueError("Not a dir. Wat?")
print(" old '%s'" % (oldPath, ))
print(" new '%s'" % (newPath, ))
newCl = nt.cleanUnicode(newPath)
if newCl != newPath:
print("Unicode oddness. Skipping")
continue
rating = nt.extractRatingToFloat(oldPath)
if rating != 0:
print(" Need to add rating = ", rating)
mv = query_response_bool(" rename?")
if mv:
#
if os.path.exists(newPath):
print("Target dir exists! Moving files instead")
moveFiles(oldPath, newPath)
os.rmdir(oldPath)
nt.dirNameProxy.changeRatingPath(newPath, rating)
else:
os.rename(oldPath, newPath)
nt.dirNameProxy.changeRatingPath(newPath, rating)
foundDirs += 1
print("Total directories that need renaming", foundDirs)
示例10: doDownload
def doDownload(self, linkDict, retag=False):
downloadUrl = self.getDownloadUrl(linkDict["dlPage"], linkDict["sourceUrl"])
if downloadUrl:
fCont, fName = self.wg.getFileAndName(downloadUrl)
# self.log.info(len(content))
if linkDict["originName"] in fName:
fileN = fName
else:
fileN = "%s - %s.zip" % (linkDict["originName"], fName)
fileN = fileN.replace(".zip .zip", ".zip")
fileN = nt.makeFilenameSafe(fileN)
chop = len(fileN) - 4
wholePath = "ERROR"
while 1:
try:
fileN = fileN[:chop] + fileN[-4:]
# self.log.info("geturl with processing", fileN)
wholePath = os.path.join(linkDict["dirPath"], fileN)
self.log.info("Complete filepath: %s", wholePath)
# Write all downloaded files to the archive.
with open(wholePath, "wb") as fp:
fp.write(fCont)
self.log.info("Successfully Saved to path: %s", wholePath)
break
except IOError:
chop = chop - 1
self.log.warn("Truncating file length to %s characters.", chop)
if not linkDict["tags"]:
linkDict["tags"] = ""
self.updateDbEntry(linkDict["sourceUrl"], downloadPath=linkDict["dirPath"], fileName=fileN)
# Deduper uses the path info for relinking, so we have to dedup the item after updating the downloadPath and fileN
dedupState = processDownload.processDownload(linkDict["seriesName"], wholePath, pron=True)
self.log.info("Done")
if dedupState:
self.addTags(sourceUrl=linkDict["sourceUrl"], tags=dedupState)
self.updateDbEntry(linkDict["sourceUrl"], dlState=2)
self.conn.commit()
else:
self.updateDbEntry(linkDict["sourceUrl"], dlState=-1, downloadPath="ERROR", fileName="ERROR: FAILED")
self.conn.commit()
return False
示例11: doDownload
def doDownload(self, linkDict, retag=False):
images = self.fetchImages(linkDict)
# self.log.info(len(content))
if images:
fileN = linkDict['originName']+".zip"
fileN = nt.makeFilenameSafe(fileN)
# self.log.info("geturl with processing", fileN)
wholePath = os.path.join(linkDict["dirPath"], fileN)
self.log.info("Complete filepath: %s", wholePath)
#Write all downloaded files to the archive.
arch = zipfile.ZipFile(wholePath, "w")
for imageName, imageContent in images:
arch.writestr(imageName, imageContent)
arch.close()
self.log.info("Successfully Saved to path: %s", wholePath)
if not linkDict["tags"]:
linkDict["tags"] = ""
self.updateDbEntry(linkDict["sourceUrl"], downloadPath=linkDict["dirPath"], fileName=fileN)
# Deduper uses the path info for relinking, so we have to dedup the item after updating the downloadPath and fileN
dedupState = processDownload.processDownload(None, wholePath, pron=True, deleteDups=True, includePHash=True)
self.log.info( "Done")
if dedupState:
self.addTags(sourceUrl=linkDict["sourceUrl"], tags=dedupState)
self.updateDbEntry(linkDict["sourceUrl"], dlState=2)
self.conn.commit()
return wholePath
else:
self.updateDbEntry(linkDict["sourceUrl"], dlState=-1, downloadPath="ERROR", fileName="ERROR: FAILED")
self.conn.commit()
return False
示例12: getDirAndFName
def getDirAndFName(self, soup):
title = soup.find("div", class_="title")
if not title:
raise ValueError("Could not find title. Wat?")
titleSplit = title.get_text().split("»")
safePath = [nt.makeFilenameSafe(item.rstrip().lstrip()) for item in titleSplit]
fqPath = os.path.join(settings.djSettings["dlDir"], *safePath)
dirPath, fName = fqPath.rsplit("/", 1)
self.log.debug("dirPath = %s", dirPath)
self.log.debug("fName = %s", fName)
return dirPath, fName, title.get_text()
示例13: getFile
def getFile(self, file_data):
row = self.getRowsByValue(sourceUrl=file_data["baseUrl"], limitByKey=False)
if row and row[0]['dlState'] != 0:
return
if not row:
self.insertIntoDb(retreivalTime = time.time(),
sourceUrl = file_data["baseUrl"],
originName = file_data["title"],
dlState = 1,
seriesName = file_data["title"])
image_links = self.getFileInfo(file_data)
images = []
for imagen, imageurl in image_links:
imdat = self.get_image(imageurl, file_data['xor_key'])
images.append((imagen, imdat))
# filen = nt.makeFilenameSafe(file_data['title'] + " - " + imagen)
# with open(filen, "wb") as fp:
# fp.write(imdat)
fileN = '{series} - c{chapNo:03.0f} [MangaBox].zip'.format(series=file_data['title'], chapNo=file_data['chapter'])
fileN = nt.makeFilenameSafe(fileN)
dlPath, newDir = self.locateOrCreateDirectoryForSeries(file_data["title"])
wholePath = os.path.join(dlPath, fileN)
if newDir:
self.updateDbEntry(file_data["baseUrl"], flags="haddir")
self.conn.commit()
arch = zipfile.ZipFile(wholePath, "w")
for imageName, imageContent in images:
arch.writestr(imageName, imageContent)
arch.close()
self.log.info("Successfully Saved to path: %s", wholePath)
dedupState = processDownload.processDownload(file_data["title"], wholePath, deleteDups=True)
if dedupState:
self.addTags(sourceUrl=file_data["baseUrl"], tags=dedupState)
self.updateDbEntry(file_data["baseUrl"], dlState=2, downloadPath=dlPath, fileName=fileN, originName=fileN)
self.conn.commit()
self.log.info( "Done")
示例14: getDownloadInfo
def getDownloadInfo(self, linkDict, retag=False):
sourcePage = linkDict["sourceUrl"]
self.log.info("Retreiving item: %s", sourcePage)
if not retag:
self.updateDbEntry(linkDict["sourceUrl"], dlState=1)
cont = self.wg.getpage(sourcePage, addlHeaders={'Referer': 'http://pururin.com/'})
soup = bs4.BeautifulSoup(cont, "lxml")
if not soup:
self.log.critical("No download at url %s! SourceUrl = %s", sourcePage, linkDict["sourceUrl"])
raise IOError("Invalid webpage")
category, tags = self.getCategoryTags(soup)
note = self.getNote(soup)
tags = ' '.join(tags)
linkDict['dirPath'] = os.path.join(settings.puSettings["dlDir"], nt.makeFilenameSafe(category))
if not os.path.exists(linkDict["dirPath"]):
os.makedirs(linkDict["dirPath"])
else:
self.log.info("Folder Path already exists?: %s", linkDict["dirPath"])
self.log.info("Folderpath: %s", linkDict["dirPath"])
#self.log.info(os.path.join())
dlPage = soup.find("a", class_="link-next")
linkDict["dlLink"] = urllib.parse.urljoin(self.urlBase, dlPage["href"])
self.log.debug("Linkdict = ")
for key, value in list(linkDict.items()):
self.log.debug(" %s - %s", key, value)
if tags:
self.log.info("Adding tag info %s", tags)
self.addTags(sourceUrl=linkDict["sourceUrl"], tags=tags)
if note:
self.log.info("Adding note %s", note)
self.updateDbEntry(linkDict["sourceUrl"], note=note)
self.updateDbEntry(linkDict["sourceUrl"], seriesName=category, lastUpdate=time.time())
return linkDict
示例15: doDownload
def doDownload(self, linkDict, retag=False):
images = self.fetchImages(linkDict)
# images = ['wat']
# print(linkDict)
# self.log.info(len(content))
if images:
linkDict["chapterNo"] = float(linkDict["chapterNo"])
fileN = '{series} - c{chapNo:06.1f} - {sourceName} [crunchyroll].zip'.format(series=linkDict['seriesName'], chapNo=linkDict["chapterNo"], sourceName=linkDict['originName'])
fileN = nt.makeFilenameSafe(fileN)
# self.log.info("geturl with processing", fileN)
wholePath = os.path.join(linkDict["dirPath"], fileN)
self.log.info("Complete filepath: %s", wholePath)
#Write all downloaded files to the archive.
arch = zipfile.ZipFile(wholePath, "w")
for imageName, imageContent in images:
arch.writestr(imageName, imageContent)
arch.close()
self.log.info("Successfully Saved to path: %s", wholePath)
if not linkDict["tags"]:
linkDict["tags"] = ""
dedupState = processDownload.processDownload(linkDict["seriesName"], wholePath, deleteDups=True)
self.log.info( "Done")
if dedupState:
self.addTags(sourceUrl=linkDict["sourceUrl"], tags=dedupState)
self.updateDbEntry(linkDict["sourceUrl"], dlState=2, downloadPath=linkDict["dirPath"], fileName=fileN, originName=fileN)
self.conn.commit()
return wholePath
else:
self.updateDbEntry(linkDict["sourceUrl"], dlState=-1, downloadPath="ERROR", fileName="ERROR: FAILED")
self.conn.commit()
return False