本文整理汇总了Python中mylar.helpers.now函数的典型用法代码示例。如果您正苦于以下问题:Python now函数的具体用法?Python now怎么用?Python now使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了now函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: log
def log(self, message, level):
logger = logging.getLogger('mylar')
threadname = threading.currentThread().getName()
if level != 'DEBUG':
if mylar.OS_DETECT == "Windows" and mylar.OS_ENCODING is not "utf-8":
tmpthedate = unicodedata.normalize('NFKD', helpers.now().decode(mylar.OS_ENCODING, "replace"))
else:
tmpthedate = helpers.now()
mylar.LOG_LIST.insert(0, (tmpthedate, message, level, threadname))
message = threadname + ' : ' + message
if level == 'DEBUG':
logger.debug(message)
elif level == 'INFO':
logger.info(message)
elif level == 'WARNING':
logger.warn(message)
elif level == 'FDEBUG':
logger.debug(message)
else:
logger.error(message)
示例2: foundsearch
def foundsearch(ComicID, IssueID, down=None):
# When doing a Force Search (Wanted tab), the resulting search calls this to update.
# this is all redudant code that forceRescan already does.
# should be redone at some point so that instead of rescanning entire
# series directory, it just scans for the issue it just downloaded and
# and change the status to Snatched accordingly. It is not to increment the have count
# at this stage as it's not downloaded - just the .nzb has been snatched and sent to SAB.
myDB = db.DBConnection()
comic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [ComicID]).fetchone()
issue = myDB.action('SELECT * FROM issues WHERE IssueID=?', [IssueID]).fetchone()
CYear = issue['IssueDate'][:4]
if down is None:
# update the status to Snatched (so it won't keep on re-downloading!)
logger.fdebug("updating status to snatched")
controlValue = {"IssueID": IssueID}
newValue = {"Status": "Snatched"}
myDB.upsert("issues", newValue, controlValue)
# update the snatched DB
snatchedupdate = {"IssueID": IssueID,
"Status": "Snatched"
}
newsnatchValues = {"ComicName": comic['ComicName'],
"ComicID": ComicID,
"Issue_Number": issue['Issue_Number'],
"DateAdded": helpers.now(),
"Status": "Snatched"
}
myDB.upsert("snatched", newsnatchValues, snatchedupdate)
else:
snatchedupdate = {"IssueID": IssueID,
"Status": "Downloaded"
}
newsnatchValues = {"ComicName": comic['ComicName'],
"ComicID": ComicID,
"Issue_Number": issue['Issue_Number'],
"DateAdded": helpers.now(),
"Status": "Downloaded"
}
myDB.upsert("snatched", newsnatchValues, snatchedupdate)
#print ("finished updating snatched db.")
logger.info(u"Updating now complete for " + comic['ComicName'] + " issue: " + str(issue['Issue_Number']))
return
示例3: tehMain
def tehMain():
logger.info('RSS Feed Check was last run at : ' + str(mylar.RSS_LASTRUN))
firstrun = "no"
#check the last run of rss to make sure it's not hammering.
if mylar.RSS_LASTRUN is None or mylar.RSS_LASTRUN == '' or mylar.RSS_LASTRUN == '0':
logger.info('RSS Feed Check First Ever Run.')
firstrun = "yes"
mins = 0
else:
c_obj_date = datetime.datetime.strptime(mylar.RSS_LASTRUN, "%Y-%m-%d %H:%M:%S")
n_date = datetime.datetime.now()
absdiff = abs(n_date - c_obj_date)
mins = (absdiff.days * 24 * 60 * 60 + absdiff.seconds) / 60.0 #3600 is for hours.
if firstrun == "no" and mins < int(mylar.RSS_CHECKINTERVAL):
logger.fdebug('RSS Check has taken place less than the threshold - not initiating at this time.')
return
mylar.RSS_LASTRUN = helpers.now()
logger.fdebug('Updating RSS Run time to : ' + str(mylar.RSS_LASTRUN))
mylar.config_write()
#function for looping through nzbs/torrent feeds
if mylar.ENABLE_TORRENTS:
logger.fdebug("[RSS] Initiating Torrent RSS Check.")
if mylar.ENABLE_KAT:
logger.fdebug('[RSS] Initiating Torrent RSS Feed Check on KAT.')
torrents(pickfeed='3')
if mylar.ENABLE_CBT:
logger.fdebug('[RSS] Initiating Torrent RSS Feed Check on CBT.')
torrents(pickfeed='1')
torrents(pickfeed='4')
logger.fdebug('RSS] Initiating RSS Feed Check for NZB Providers.')
nzbs()
logger.fdebug('[RSS] RSS Feed Check/Update Complete')
logger.fdebug('[RSS] Watchlist Check for new Releases')
#if mylar.ENABLE_TORRENTS:
# if mylar.ENABLE_KAT:
# search.searchforissue(rsscheck='yes')
# if mylar.ENABLE_CBT:
mylar.search.searchforissue(rsscheck='yes')
#nzbcheck here
#nzbs(rsscheck='yes')
logger.fdebug('[RSS] Watchlist Check complete.')
return
示例4: markFailed
def markFailed(self):
#use this to forcibly mark a single issue as being Failed (ie. if a search result is sent to a client, but the result
#ends up passing in a 404 or something that makes it so that the download can't be initiated).
module = '[FAILED-DOWNLOAD]'
myDB = db.DBConnection()
logger.info(module + ' Marking as a Failed Download.')
logger.fdebug(module + 'nzb_name: ' + self.nzb_name)
logger.fdebug(module + 'issueid: ' + str(self.issueid))
logger.fdebug(module + 'nzb_id: ' + str(self.id))
logger.fdebug(module + 'prov: ' + self.prov)
logger.fdebug('oneoffinfo: ' + str(self.oneoffinfo))
if self.oneoffinfo:
ComicName = self.oneoffinfo['ComicName']
IssueNumber = self.oneoffinfo['IssueNumber']
else:
if 'annual' in self.nzb_name.lower():
logger.info(module + ' Annual detected.')
annchk = "yes"
issuenzb = myDB.selectone("SELECT * from annuals WHERE IssueID=? AND ComicName NOT NULL", [self.issueid]).fetchone()
else:
issuenzb = myDB.selectone("SELECT * from issues WHERE IssueID=? AND ComicName NOT NULL", [self.issueid]).fetchone()
ctrlVal = {"IssueID": self.issueid}
Vals = {"Status": 'Failed'}
myDB.upsert("issues", Vals, ctrlVal)
ComicName = issuenzb['ComicName']
IssueNumber = issuenzb['Issue_Number']
ctrlVal = {"ID": self.id,
"Provider": self.prov,
"NZBName": self.nzb_name}
Vals = {"Status": 'Failed',
"ComicName": ComicName,
"Issue_Number": IssueNumber,
"IssueID": self.issueid,
"ComicID": self.comicid,
"DateFailed": helpers.now()}
myDB.upsert("failed", Vals, ctrlVal)
logger.info(module + ' Successfully marked as Failed.')
示例5: foundsearch
def foundsearch(ComicID, IssueID):
myDB = db.DBConnection()
#print ("Updater-ComicID: " + str(ComicID))
#print ("Updater-IssueID: " + str(IssueID))
comic = myDB.action('SELECT * FROM comics WHERE ComicID=?', [ComicID]).fetchone()
issue = myDB.action('SELECT * FROM issues WHERE IssueID=?', [IssueID]).fetchone()
#print ("comic location: " + comic['ComicLocation'])
#this is too soon - file hasn't downloaded even yet.
#fixed and addressed in search.py and follow-thru here!
#check sab history for completion here :)
CYear = issue['IssueDate'][:4]
print ("year:" + str(CYear))
#slog = myDB.action('SELECT * FROM sablog WHERE ComicName=? AND ComicYEAR=?', [issue['ComicName'], str(CYear)]).fetchone()
#this checks the active queue for downloading/non-existant jobs
#--end queue check
#this checks history for completed jobs...
#---
#-- end history check
fc = filechecker.listFiles(comic['ComicLocation'], comic['ComicName'])
HaveDict = {"ComicID": ComicID}
newHave = { "Have": fc['comiccount'] }
myDB.upsert("comics", newHave, HaveDict)
#---
issue = myDB.action('SELECT * FROM issues WHERE IssueID=? AND ComicID=?', [IssueID, ComicID]).fetchone()
#print ("updating status to snatched")
controlValueDict = {"IssueID": IssueID}
newValueDict = {"Status": "Snatched"}
#print ("updating snatched db.")
myDB.upsert("issues", newValueDict, controlValueDict)
snatchedupdate = {"IssueID": IssueID}
newsnatchValues = {"ComicName": comic['ComicName'],
"ComicID": ComicID,
"Issue_Number": issue['Issue_Number'],
"DateAdded": helpers.now(),
"Status": "Snatched"
}
myDB.upsert("snatched", newsnatchValues, snatchedupdate)
#we need to update sablog now to mark the nzo_id row as being completed and not used again.
#this becomes an issue with files downloaded x2 or same name...
#print ("finished updating snatched db.")
logger.info(u"Updating now complete for " + str(comic['ComicName']) + " issue: " + str(issue['Issue_Number']))
return
示例6: foundsearch
def foundsearch(ComicID, IssueID):
# When doing a Force Search (Wanted tab), the resulting search calls this to update.
# this is all redudant code that forceRescan already does.
# should be redone at some point so that instead of rescanning entire
# series directory, it just scans for the issue it just downloaded and
# and change the status to Snatched accordingly. It is not to increment the have count
# at this stage as it's not downloaded - just the .nzb has been snatched and sent to SAB.
myDB = db.DBConnection()
comic = myDB.action("SELECT * FROM comics WHERE ComicID=?", [ComicID]).fetchone()
issue = myDB.action("SELECT * FROM issues WHERE IssueID=?", [IssueID]).fetchone()
CYear = issue["IssueDate"][:4]
# fc = filechecker.listFiles(comic['ComicLocation'], comic['ComicName'])
# HaveDict = {"ComicID": ComicID}
# newHave = { "Have": fc['comiccount'] }
# myDB.upsert("comics", newHave, HaveDict)
# #---
issue = myDB.action("SELECT * FROM issues WHERE IssueID=? AND ComicID=?", [IssueID, ComicID]).fetchone()
# update the status to Snatched (so it won't keep on re-downloading!)
logger.fdebug("updating status to snatched")
controlValue = {"IssueID": IssueID}
newValue = {"Status": "Snatched"}
myDB.upsert("issues", newValue, controlValue)
# update the snatched DB
controlValueDict = {"IssueID": IssueID}
newValueDict = {"Status": "Snatched"}
logger.fdebug("updating snatched db.")
myDB.upsert("issues", newValueDict, controlValueDict)
snatchedupdate = {"IssueID": IssueID}
newsnatchValues = {
"ComicName": comic["ComicName"],
"ComicID": ComicID,
"Issue_Number": issue["Issue_Number"],
"DateAdded": helpers.now(),
"Status": "Snatched",
}
myDB.upsert("snatched", newsnatchValues, snatchedupdate)
# print ("finished updating snatched db.")
logger.info(u"Updating now complete for " + str(comic["ComicName"]) + " issue: " + str(issue["Issue_Number"]))
return
示例7: log
def log(self, message, level):
logger = logging.getLogger('mylar')
threadname = threading.currentThread().getName()
if level != 'DEBUG':
mylar.LOG_LIST.insert(0, (helpers.now(), message, level, threadname))
message = threadname + ' : ' + message
if level == 'DEBUG':
logger.debug(message)
elif level == 'INFO':
logger.info(message)
elif level == 'WARNING':
logger.warn(message)
else:
logger.error(message)
示例8: pullitcheck
def pullitcheck(comic1off_name=None,comic1off_id=None,forcecheck=None):
logger.info(u"Checking the Weekly Releases list for comics I'm watching...")
myDB = db.DBConnection()
not_t = ['TP',
'NA',
'HC',
'PI']
not_c = ['PTG',
'COMBO PACK',
'(PP #']
lines = []
unlines = []
llen = []
ccname = []
pubdate = []
w = 0
tot = 0
chkout = []
watchfnd = []
watchfndiss = []
watchfndextra = []
#print ("----------WATCHLIST--------")
a_list = []
b_list = []
comicid = []
mylardb = os.path.join(mylar.DATA_DIR, "mylar.db")
con = sqlite3.connect(str(mylardb))
with con:
cur = con.cursor()
# if it's a one-off check (during an add series), load the comicname here and ignore below.
if comic1off_name:
logger.fdebug("this is a one-off" + str(comic1off_name))
lines.append(comic1off_name.strip())
unlines.append(comic1off_name.strip())
comicid.append(comic1off_id)
w = 1
else:
#let's read in the comic.watchlist from the db here
cur.execute("SELECT ComicID, ComicName, ComicYear, ComicPublisher, ComicPublished, LatestDate from comics")
while True:
watchd = cur.fetchone()
#print ("watchd: " + str(watchd))
if watchd is None:
break
if 'Present' in watchd[4] or (helpers.now()[:4] in watchd[4]):
# this gets buggered up when series are named the same, and one ends in the current
# year, and the new series starts in the same year - ie. Avengers
# lets' grab the latest issue date and see how far it is from current
# anything > 45 days we'll assume it's a false match ;)
#logger.fdebug("ComicName: " + watchd[1])
latestdate = watchd[5]
#logger.fdebug("latestdate: " + str(latestdate))
c_date = datetime.date(int(latestdate[:4]),int(latestdate[5:7]),1)
n_date = datetime.date.today()
#logger.fdebug("c_date : " + str(c_date) + " ... n_date : " + str(n_date))
recentchk = (n_date - c_date).days
#logger.fdebug("recentchk: " + str(recentchk) + " days")
#logger.fdebug(" ----- ")
if recentchk < 55:
# let's not even bother with comics that are in the Present.
a_list.append(watchd[1])
b_list.append(watchd[2])
comicid.append(watchd[0])
pubdate.append(watchd[4])
#print ( "Comic:" + str(a_list[w]) + " Year: " + str(b_list[w]) )
#if "WOLVERINE AND THE X-MEN" in str(a_list[w]): a_list[w] = "WOLVERINE AND X-MEN"
lines.append(a_list[w].strip())
unlines.append(a_list[w].strip())
llen.append(a_list[w].splitlines())
ccname.append(a_list[w].strip())
tmpwords = a_list[w].split(None)
ltmpwords = len(tmpwords)
ltmp = 1
w+=1
cnt = int(w-1)
cntback = int(w-1)
kp = []
ki = []
kc = []
otot = 0
logger.fdebug("You are watching for: " + str(w) + " comics")
#print ("----------THIS WEEK'S PUBLISHED COMICS------------")
if w > 0:
while (cnt > -1):
lines[cnt] = lines[cnt].upper()
#llen[cnt] = str(llen[cnt])
#logger.fdebug("looking for : " + str(lines[cnt]))
sqlsearch = re.sub('[\_\#\,\/\:\;\.\-\!\$\%\&\'\?\@]', ' ', lines[cnt])
sqlsearch = re.sub(r'\s', '%', sqlsearch)
if 'THE' in sqlsearch: sqlsearch = re.sub('THE', '', sqlsearch)
if '+' in sqlsearch: sqlsearch = re.sub('\+', '%PLUS%', sqlsearch)
#.........这里部分代码省略.........
示例9: Process
#.........这里部分代码省略.........
issueid = nzbiss['IssueID']
logger.fdebug(module + ' Issueid: ' + str(issueid))
sarc = nzbiss['SARC']
#use issueid to get publisher, series, year, issue number
else:
issueid = self.issueid
nzbiss = myDB.selectone("SELECT * from nzblog WHERE IssueID=?", [issueid]).fetchone()
if nzbiss is None:
logger.info(module + ' Cannot locate corresponding record in download history. This will be implemented soon.')
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
nzbname = nzbiss['NZBName']
# find the provider.
self.prov = nzbiss['PROVIDER']
logger.info(module + ' Provider: ' + self.prov)
# grab the id.
self.id = nzbiss['ID']
logger.info(module + ' ID: ' + self.id)
annchk = "no"
if 'annual' in nzbname.lower():
logger.info(module + ' Annual detected.')
annchk = "yes"
issuenzb = myDB.selectone("SELECT * from annuals WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone()
else:
issuenzb = myDB.selectone("SELECT * from issues WHERE IssueID=? AND ComicName NOT NULL", [issueid]).fetchone()
if issuenzb is not None:
logger.info(module + ' issuenzb found.')
if helpers.is_number(issueid):
sandwich = int(issuenzb['IssueID'])
else:
logger.info(module + ' issuenzb not found.')
#if it's non-numeric, it contains a 'G' at the beginning indicating it's a multi-volume
#using GCD data. Set sandwich to 1 so it will bypass and continue post-processing.
if 'S' in issueid:
sandwich = issueid
elif 'G' in issueid or '-' in issueid:
sandwich = 1
if helpers.is_number(sandwich):
if sandwich < 900000:
# if sandwich is less than 900000 it's a normal watchlist download. Bypass.
pass
else:
logger.info('Failed download handling for story-arcs and one-off\'s are not supported yet. Be patient!')
self._log(' Unable to locate downloaded file to rename. PostProcessing aborted.')
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
comicid = issuenzb['ComicID']
issuenumOG = issuenzb['Issue_Number']
logger.info(module + ' Successfully detected as : ' + issuenzb['ComicName'] + ' issue: ' + str(issuenzb['Issue_Number']) + ' that was downloaded using ' + self.prov)
self._log('Successfully detected as : ' + issuenzb['ComicName'] + ' issue: ' + str(issuenzb['Issue_Number']) + ' downloaded using ' + self.prov)
logger.info(module + ' Marking as a Failed Download.')
self._log('Marking as a Failed Download.')
ctrlVal = {"IssueID": issueid}
Vals = {"Status": 'Failed'}
myDB.upsert("issues", Vals, ctrlVal)
ctrlVal = {"ID": self.id,
"Provider": self.prov,
"NZBName": nzbname}
Vals = {"Status": 'Failed',
"ComicName": issuenzb['ComicName'],
"Issue_Number": issuenzb['Issue_Number'],
"IssueID": issueid,
"ComicID": comicid,
"DateFailed": helpers.now()}
myDB.upsert("failed", Vals, ctrlVal)
logger.info(module + ' Successfully marked as Failed.')
self._log('Successfully marked as Failed.')
if mylar.CONFIG.FAILED_AUTO:
logger.info(module + ' Sending back to search to see if we can find something that will not fail.')
self._log('Sending back to search to see if we can find something better that will not fail.')
self.valreturn.append({"self.log": self.log,
"mode": 'retry',
"issueid": issueid,
"comicid": comicid,
"comicname": issuenzb['ComicName'],
"issuenumber": issuenzb['Issue_Number'],
"annchk": annchk})
return self.queue.put(self.valreturn)
else:
logger.info(module + ' Stopping search here as automatic handling of failed downloads is not enabled *hint*')
self._log('Stopping search here as automatic handling of failed downloads is not enabled *hint*')
self.valreturn.append({"self.log": self.log,
"mode": 'stop'})
return self.queue.put(self.valreturn)
示例10: emit
def emit(self, record):
message = self.format(record)
message = message.replace("\n", "<br />")
mylar.LOG_LIST.insert(0, (helpers.now(), message, record.levelname, record.threadName))
示例11: pullitcheck
def pullitcheck(comic1off_name=None, comic1off_id=None, forcecheck=None):
logger.info(u"Checking the Weekly Releases list for comics I'm watching...")
myDB = db.DBConnection()
not_t = ["TP", "NA", "HC", "PI"]
not_c = ["PTG", "COMBO PACK", "(PP #"]
lines = []
unlines = []
llen = []
ccname = []
pubdate = []
w = 0
tot = 0
chkout = []
watchfnd = []
watchfndiss = []
watchfndextra = []
# print ("----------WATCHLIST--------")
a_list = []
b_list = []
comicid = []
mylardb = os.path.join(mylar.DATA_DIR, "mylar.db")
con = sqlite3.connect(str(mylardb))
with con:
cur = con.cursor()
# if it's a one-off check (during an add series), load the comicname here and ignore below.
if comic1off_name:
lines.append(comic1off_name.strip())
unlines.append(comic1off_name.strip())
comicid.append(comic1off_id)
w = 1
else:
# let's read in the comic.watchlist from the db here
cur.execute("SELECT ComicID, ComicName, ComicYear, ComicPublisher, ComicPublished from comics")
while True:
watchd = cur.fetchone()
# print ("watchd: " + str(watchd))
if watchd is None:
break
if "Present" in watchd[4] or (helpers.now()[:4] in watchd[4]):
# let's not even bother with comics that are in the Present.
a_list.append(watchd[1])
b_list.append(watchd[2])
comicid.append(watchd[0])
pubdate.append(watchd[4])
# print ( "Comic:" + str(a_list[w]) + " Year: " + str(b_list[w]) )
# if "WOLVERINE AND THE X-MEN" in str(a_list[w]): a_list[w] = "WOLVERINE AND X-MEN"
lines.append(a_list[w].strip())
unlines.append(a_list[w].strip())
llen.append(a_list[w].splitlines())
ccname.append(a_list[w].strip())
tmpwords = a_list[w].split(None)
ltmpwords = len(tmpwords)
ltmp = 1
w += 1
cnt = int(w - 1)
cntback = int(w - 1)
kp = []
ki = []
kc = []
otot = 0
logger.fdebug("You are watching for: " + str(w) + " comics")
# print ("----------THIS WEEK'S PUBLISHED COMICS------------")
if w > 0:
while cnt > -1:
lines[cnt] = lines[cnt].upper()
# llen[cnt] = str(llen[cnt])
logger.fdebug("looking for : " + str(lines[cnt]))
sqlsearch = re.sub("[\_\#\,\/\:\;\.\-\!\$\%\&'\?\@]", " ", lines[cnt])
sqlsearch = re.sub(r"\s", "%", sqlsearch)
if "THE" in sqlsearch:
sqlsearch = re.sub("THE", "", sqlsearch)
if "+" in sqlsearch:
sqlsearch = re.sub("\+", "%PLUS%", sqlsearch)
logger.fdebug("searchsql: " + str(sqlsearch))
weekly = myDB.select(
"SELECT PUBLISHER, ISSUE, COMIC, EXTRA, SHIPDATE FROM weekly WHERE COMIC LIKE (?)", [sqlsearch]
)
# cur.execute('SELECT PUBLISHER, ISSUE, COMIC, EXTRA, SHIPDATE FROM weekly WHERE COMIC LIKE (?)', [lines[cnt]])
for week in weekly:
if week == None:
break
for nono in not_t:
if nono in week["PUBLISHER"]:
logger.fdebug("nono present")
break
if nono in week["ISSUE"]:
# logger.fdebug("graphic novel/tradeback detected..ignoring.")
break
for nothere in not_c:
if nothere in week["EXTRA"]:
# logger.fdebug("nothere present")
#.........这里部分代码省略.........
示例12: _OneOffs
def _OneOffs(self, **kwargs):
index = 0
if 'index' in kwargs:
index = int(kwargs['index'])
links = []
entries = []
flist = []
book = ''
gbd = str(mylar.CONFIG.GRABBAG_DIR + '/*').encode('utf-8')
flist = glob.glob(gbd)
readlist = []
for book in flist:
issue = {}
fileexists = True
book = book.encode('utf-8')
issue['Title'] = book
issue['IssueID'] = book
issue['fileloc'] = book
issue['filename'] = book
issue['image'] = None
issue['thumbnail'] = None
issue['updated'] = helpers.now()
if not os.path.isfile(issue['fileloc']):
fileexists = False
if fileexists:
readlist.append(issue)
if len(readlist) > 0:
if index <= len(readlist):
subset = readlist[index:(index + self.PAGE_SIZE)]
for issue in subset:
metainfo = None
metainfo = [{'writer': None,'summary': ''}]
entries.append(
{
'title': escape(issue['Title']),
'id': escape('comic:%s' % issue['IssueID']),
'updated': issue['updated'],
'content': escape('%s' % (metainfo[0]['summary'])),
'href': '%s?cmd=deliverFile&file=%s&filename=%s' % (self.opdsroot, quote_plus(issue['fileloc']), quote_plus(issue['filename'])),
'kind': 'acquisition',
'rel': 'file',
'author': metainfo[0]['writer'],
'image': issue['image'],
'thumbnail': issue['thumbnail'],
}
)
feed = {}
feed['title'] = 'Mylar OPDS - One-Offs'
feed['id'] = escape('OneOffs')
feed['updated'] = mylar.helpers.now()
links.append(getLink(href=self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='start', title='Home'))
links.append(getLink(href='%s?cmd=OneOffs' % self.opdsroot,type='application/atom+xml; profile=opds-catalog; kind=navigation',rel='self'))
if len(readlist) > (index + self.PAGE_SIZE):
links.append(
getLink(href='%s?cmd=OneOffs&index=%s' % (self.opdsroot, index+self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='next'))
if index >= self.PAGE_SIZE:
links.append(
getLink(href='%s?cmd=Read&index=%s' % (self.opdsroot, index-self.PAGE_SIZE), type='application/atom+xml; profile=opds-catalog; kind=navigation', rel='previous'))
feed['links'] = links
feed['entries'] = entries
self.data = feed
return
示例13: addComictoDB
#.........这里部分代码省略.........
# print ("issb4dec:" + str(issb4dec))
# if the length of decimal is only 1 digit, assume it's a tenth
decis = str(gcdval["GCDIssue"])[issst + 1 :]
# print ("decis:" + str(decis))
if len(decis) == 1:
decisval = int(decis) * 10
issaftdec = str(decisval)
if len(decis) == 2:
decisval = int(decis)
issaftdec = str(decisval)
gcd_issue = issb4dec + "." + issaftdec
# print ("gcd_issue:" + str(gcd_issue))
gcdis = (int(issb4dec) * 1000) + decisval
else:
gcdis = int(str(gcdval["GCDIssue"])) * 1000
if gcdis == issis:
issdate = str(gcdval["GCDDate"])
int_issnum = int(gcdis / 1000)
# get the latest issue / date using the date.
if gcdval["GCDDate"] > latestdate:
latestiss = str(issnum)
latestdate = str(gcdval["GCDDate"])
break
# bb = iscnt
bb += 1
# print("(" + str(n) + ") IssueID: " + str(issid) + " IssueNo: " + str(issnum) + " Date" + str(issdate))
# ---END.NEW.
# check if the issue already exists
iss_exists = myDB.action("SELECT * from issues WHERE IssueID=?", [issid]).fetchone()
# Only change the status & add DateAdded if the issue is already in the database
if iss_exists is None:
newValueDict["DateAdded"] = helpers.today()
controlValueDict = {"IssueID": issid}
newValueDict = {
"ComicID": comicid,
"ComicName": comic["ComicName"],
"IssueName": issname,
"Issue_Number": issnum,
"IssueDate": issdate,
"Int_IssueNumber": int_issnum,
}
if mylar.AUTOWANT_ALL:
newValueDict["Status"] = "Wanted"
# elif release_dict['releasedate'] > helpers.today() and mylar.AUTOWANT_UPCOMING:
# newValueDict['Status'] = "Wanted"
else:
newValueDict["Status"] = "Skipped"
if iss_exists:
# print ("Existing status : " + str(iss_exists['Status']))
newValueDict["Status"] = iss_exists["Status"]
myDB.upsert("issues", newValueDict, controlValueDict)
n += 1
# logger.debug(u"Updating comic cache for " + comic['ComicName'])
# cache.getThumb(ComicID=issue['issueid'])
# logger.debug(u"Updating cache for: " + comic['ComicName'])
# cache.getThumb(ComicIDcomicid)
# check for existing files...
updater.forceRescan(comicid)
controlValueStat = {"ComicID": comicid}
newValueStat = {
"Status": "Active",
"LatestIssue": latestiss,
"LatestDate": latestdate,
"LastUpdated": helpers.now(),
}
myDB.upsert("comics", newValueStat, controlValueStat)
logger.info(u"Updating complete for: " + comic["ComicName"])
# lets' check the pullist for anyting at this time as well since we're here.
if mylar.AUTOWANT_UPCOMING:
logger.info(u"Checking this week's pullist for new issues of " + str(comic["ComicName"]))
updater.newpullcheck()
# here we grab issues that have been marked as wanted above...
results = myDB.select("SELECT * FROM issues where ComicID=? AND Status='Wanted'", [comicid])
if results:
logger.info(u"Attempting to grab wanted issues for : " + comic["ComicName"])
for result in results:
foundNZB = "none"
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL) and (mylar.SAB_HOST):
foundNZB = search.searchforissue(result["IssueID"])
if foundNZB == "yes":
updater.foundsearch(result["ComicID"], result["IssueID"])
else:
logger.info(u"No issues marked as wanted for " + comic["ComicName"])
logger.info(u"Finished grabbing what I could.")
示例14: GCDimport
#.........这里部分代码省略.........
issaftdec = str(decisval)
if len(decis) == 2:
decisval = int(decis)
issaftdec = str(decisval)
if int(issaftdec) == 0:
issaftdec = "00"
gcd_issue = issb4dec + "." + issaftdec
gcdis = (int(issb4dec) * 1000) + decisval
else:
gcdis = int(str(gcdval["GCDIssue"])) * 1000
gcd_issue = str(gcdval["GCDIssue"])
# get the latest issue / date using the date.
int_issnum = int(gcdis / 1000)
issdate = str(gcdval["GCDDate"])
issid = "G" + str(gcdval["IssueID"])
if gcdval["GCDDate"] > latestdate:
latestiss = str(gcd_issue)
latestdate = str(gcdval["GCDDate"])
# print("(" + str(bb) + ") IssueID: " + str(issid) + " IssueNo: " + str(gcd_issue) + " Date" + str(issdate) )
# ---END.NEW.
# check if the issue already exists
iss_exists = myDB.action("SELECT * from issues WHERE IssueID=?", [issid]).fetchone()
# Only change the status & add DateAdded if the issue is not already in the database
if iss_exists is None:
newValueDict["DateAdded"] = helpers.today()
# adjust for inconsistencies in GCD date format - some dates have ? which borks up things.
if "?" in str(issdate):
issdate = "0000-00-00"
controlValueDict = {"IssueID": issid}
newValueDict = {
"ComicID": gcomicid,
"ComicName": ComicName,
"Issue_Number": gcd_issue,
"IssueDate": issdate,
"Int_IssueNumber": int_issnum,
}
# print ("issueid:" + str(controlValueDict))
# print ("values:" + str(newValueDict))
if mylar.AUTOWANT_ALL:
newValueDict["Status"] = "Wanted"
# elif release_dict['releasedate'] > helpers.today() and mylar.AUTOWANT_UPCOMING:
# newValueDict['Status'] = "Wanted"
else:
newValueDict["Status"] = "Skipped"
if iss_exists:
# print ("Existing status : " + str(iss_exists['Status']))
newValueDict["Status"] = iss_exists["Status"]
myDB.upsert("issues", newValueDict, controlValueDict)
bb += 1
# logger.debug(u"Updating comic cache for " + ComicName)
# cache.getThumb(ComicID=issue['issueid'])
# logger.debug(u"Updating cache for: " + ComicName)
# cache.getThumb(ComicIDcomicid)
# check for existing files...
updater.forceRescan(gcomicid)
controlValueStat = {"ComicID": gcomicid}
newValueStat = {
"Status": "Active",
"LatestIssue": latestiss,
"LatestDate": latestdate,
"LastUpdated": helpers.now(),
}
myDB.upsert("comics", newValueStat, controlValueStat)
logger.info(u"Updating complete for: " + ComicName)
# lets' check the pullist for anyting at this time as well since we're here.
if mylar.AUTOWANT_UPCOMING:
logger.info(u"Checking this week's pullist for new issues of " + str(ComicName))
updater.newpullcheck()
# here we grab issues that have been marked as wanted above...
results = myDB.select("SELECT * FROM issues where ComicID=? AND Status='Wanted'", [gcomicid])
if results:
logger.info(u"Attempting to grab wanted issues for : " + ComicName)
for result in results:
foundNZB = "none"
if (mylar.NZBSU or mylar.DOGNZB or mylar.EXPERIMENTAL) and (mylar.SAB_HOST):
foundNZB = search.searchforissue(result["IssueID"])
if foundNZB == "yes":
updater.foundsearch(result["ComicID"], result["IssueID"])
else:
logger.info(u"No issues marked as wanted for " + ComicName)
logger.info(u"Finished grabbing what I could.")
示例15: str
recentchk = (n_date - c_date).days
#print ("recentchk: " + str(recentchk))
if recentchk <= 55:
lastpubdate = 'Present'
else:
lastpubdate = str(ltmonth) + ' ' + str(ltyear)
publishfigure = str(stmonth) + ' ' + str(styear) + ' - ' + str(lastpubdate)
controlValueStat = {"ComicID": comicid}
newValueStat = {"Status": "Active",
"LatestIssue": latestiss,
"LatestDate": latestdate,
"ComicPublished": publishfigure,
"LastUpdated": helpers.now()
}
myDB.upsert("comics", newValueStat, controlValueStat)
if mylar.CVINFO or (mylar.CV_ONLY and mylar.CVINFO):
if not os.path.exists(os.path.join(comlocation,"cvinfo")) or mylar.CV_ONETIMER:
with open(os.path.join(comlocation,"cvinfo"),"w") as text_file:
text_file.write(str(comic['ComicURL']))
logger.info(u"Updating complete for: " + comic['ComicName'])
#move the files...if imported is not empty (meaning it's not from the mass importer.)
if imported is None or imported == 'None':
pass
else: