本文整理汇总了Python中util.timestamp函数的典型用法代码示例。如果您正苦于以下问题:Python timestamp函数的具体用法?Python timestamp怎么用?Python timestamp使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了timestamp函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: list_trash
def list_trash(cookie, tokens, path="/", page=1, num=100):
"""获取回收站的信息.
path - 目录的绝对路径, 默认是根目录
page - 页码, 默认是第一页
num - 每页有多少个文件, 默认是100个.
回收站里面的文件会被保存10天, 10天后会自动被清空.
回收站里面的文件不占用用户的存储空间.
"""
url = "".join(
[
const.PAN_API_URL,
"recycle/list?channel=chunlei&clienttype=0&web=1",
"&num=",
str(num),
"&t=",
util.timestamp(),
"&dir=",
encoder.encode_uri_component(path),
"&t=",
util.latency(),
"&order=time&desc=1",
"&_=",
util.timestamp(),
"&bdstoken=",
tokens["bdstoken"],
]
)
req = net.urlopen(url, headers={"Cookie": cookie.header_output()})
if req:
content = req.data
return json.loads(content.decode())
else:
return None
示例2: new_from_url
def new_from_url ( url ):
try:
d = feedparser.parse( url )
if not d.has_key( 'status' ):
raise Exception( 'Error fetching content. Bad URL?' )
if d.status != 200 and d.status != 301 and d.status != 302:
raise Exception( d.debug_message)
if not d.feed.has_key( 'title' ):
raise Exception( "Content does not appear to be an RSS feed." )
if d.has_key( 'etag' ):
etag = d.etag
else:
etag = ''
if d.has_key( 'modified' ):
modified = d['modified']
else:
modified = datetime.now().timetuple()
feed = Feed()
feed.url = d.href
feed.title = d.feed.title
feed.link = d.feed.link
try:
feed.description = d.feed.description
except AttributeError, e:
pass
feed.etag = etag
feed.modified = util.timestamp( modified )
feed.added = util.timestamp()
feed.insert()
return feed
示例3: list_share_path
def list_share_path(cookie, tokens, uk, path, share_id, page):
'''列举出用户共享的某一个目录中的文件信息
uk - user key
path - 共享目录
share_id - 共享文件的ID值
'''
url = ''.join([
const.PAN_URL,
'share/list?channel=chunlei&clienttype=0&web=1&num=100',
'&t=', util.timestamp(),
'&page=', str(page),
'&dir=', encoder.encode_uri_component(path),
'&t=', util.latency(),
'&shareid=', share_id,
'&order=time&desc=1',
'&uk=', uk,
'&_=', util.timestamp(),
'&bdstoken=', tokens['bdstoken'],
])
req = net.urlopen(url, headers={
'Cookie': cookie.header_output(),
'Referer': const.SHARE_REFERER,
})
if req:
content = req.data
return json.loads(content.decode())
else:
return None
示例4: list_trash
def list_trash(cookie, tokens, path='/', page=1, num=100):
'''获取回收站的信息.
path - 目录的绝对路径, 默认是根目录
page - 页码, 默认是第一页
num - 每页有多少个文件, 默认是100个.
回收站里面的文件会被保存10天, 10天后会自动被清空.
回收站里面的文件不占用用户的存储空间.
'''
url = ''.join([
const.PAN_API_URL,
'recycle/list?channel=chunlei&clienttype=0&web=1',
'&num=', str(num),
'&t=', util.timestamp(),
'&dir=', encoder.encode_uri_component(path),
'&t=', util.latency(),
'&order=time&desc=1',
'&_=', util.timestamp(),
'&bdstoken=', tokens['bdstoken'],
])
req = net.urlopen(url, headers={'Cookie': cookie.header_output()})
if req:
content = req.data
return json.loads(content.decode())
else:
return None
示例5: list_share
def list_share(cookie, tokens, path='/', page=1, num=100):
'''获取用户已经共享的文件的信息
path - 哪个目录的信息, 默认为根目录.
page - 页数, 默认为第一页.
num - 一次性获取的共享文件的数量, 默认为100个.
'''
url = ''.join([
const.PAN_URL,
'share/record?channel=chunlei&clienttype=0&web=1',
'&num=', str(num),
'&t=', util.timestamp(),
'&page=', str(page),
'&dir=', encoder.encode_uri_component(path),
'&t=', util.latency(),
'&order=tme&desc=1',
'&_=', util.timestamp(),
'&bdstoken=', tokens['bdstoken'],
])
req = net.urlopen(url, headers={
'Cookie': cookie.header_output(),
'Referer': const.SHARE_REFERER,
})
if req:
content = req.data
return json.loads(content.decode())
else:
return None
示例6: run
def run ( config_path ):
config = ConfigParser.RawConfigParser()
config.read( config_path )
conn = sqlite3.connect( config.get( 'Database', 'path' ) )
cursor = conn.cursor()
while True:
try:
res = cursor.execute( 'SELECT [id], [url], [modified], [etag], [interval] FROM [feeds] WHERE [checked] + [interval] < ?', ( util.timestamp(), ) )
for row in res:
try:
d = feedparser.parse( row[1], etag=row[3], modified=row[2] )
if not d.has_key( 'status' ):
raise Exception( 'Error fetching content. Bad URL?' )
if d.status != 200 and d.status != 301 and d.status != 302 and d.status != 304:
raise Exception( d.debug_message)
if not d.feed.has_key( 'title' ):
raise Exception( "Content does not appear to be an RSS feed." )
except Exception, e:
conn.execute( "INSERT INTO [log] ( [logged], [level], [message] ) VALUES ( ?, ?, ? )", ( util.timestamp(), 'ERROR', 'Error fetching feed #' + str( row[0] ) + ": " + str( e ) ) )
conn.execute( "UPDATE [feeds] SET [checked] = ? WHERE [id] = ?", ( util.timestamp() - int( row[4] / 2 ), row[0] ) )
continue
try:
if d.status == 304:
conn.execute( "UPDATE [feeds] SET [checked] = ? WHERE [id] = ?", ( util.timestamp(), row[0] ) )
else:
count = 0
for entry in d.entries:
result = conn.execute( "SELECT COUNT(*) FROM messages WHERE [feed_id] = ? AND [uuid] = ?", ( row[0], entry.id ) )
if 0 != result[0][0]:
break
conn.execute( "INSERT INTO messages ( [feed_id], [fetched], [posted], [title], [link], [uuid], [content] ) VALUES ( ?, ?, ?, ?, ?, ?, ? )", ( row[0], util.timestamp(), util.timestamp( entry.date_parsed ), entry.title, entry.link, entry.id, entry.content) )
if d.has_key( 'etag' ):
etag = d.etag
else:
etag = ''
if d.has_key( 'modified' ):
modified = modified
else:
modified = datetime.now().timetuple()
conn.execute( "UPDATE [feeds] SET [checked] = ?, [modified] = ?, [etag] = ? WHERE [id] = ?", ( util.timestamp(), modified, etag, row[0] ) )
conn.execute( "INSERT INTO [log] ( [logged], [level], [message] ) VALUES ( ?, ?, ? )", ( util.timestamp(), 'DEBUG', 'Updated feed #' + str( row[0] ) + " with " + count + " new entries." ) )
except Exception, e:
conn.execute( "INSERT INTO [log] ( [logged], [level], [message] ) VALUES ( ?, ?, ? )", ( util.timestamp(), 'ERROR', 'Error parsing feed #' + str( row[0] ) + ": " + str( e ) ) )
time.sleep( 30 ) # Arbitrary...
示例7: thermal_detection_start
def thermal_detection_start(self):
self.cameraThread.start_recording()
self.data = {
"__type__": "thermalVideoRecording",
"recordingDateTime": util.datetimestamp(),
"recordingTime": util.timestamp()
}
示例8: __init__
def __init__(self, name = None):
""" create the channel """
self.name = name
self.time_c = timestamp()
self.topic = ''
self.users = []
示例9: get_bduss
def get_bduss(cookie, token, username, password):
'''获取最重要的登录cookie, 拿到这个cookie后, 就得到了最终的访问授权.
token - 使用get_token()得到的token值.
cookie - BAIDUID 这个cookie.
username - 用户名
password - 明文密码
@return 最后会返回一个list, 里面包含了登录*.baidu.com需要的授权cookies.
'''
url = const.PASSPORT_URL + '?login'
data = ''.join([
'staticpage=http%3A%2F%2Fwww.baidu.com%2Fcache%2Fuser%2Fhtml%2Fv3Jump.html',
'&charset=utf-8',
'&token=', token,
'&tpl=mn&apiver=v3',
'&tt=', util.timestamp(),
'&codestring=&safeflg=0&u=https%3A%2F%2Fpassport.baidu.com%2F',
'&isPhone=false&quick_user=0',
#'&loginmerge=true&logintype=basicLogin',
'&usernamelogin=1&spligin=rate',
'&username=', username,
'&password=', password,
'&verifycode=&mem_pass=on',
'&ppui_logintime=', get_ppui_logintime(),
'&callback=parent.bd__pcbs__cb',
])
req = net.urlopen(url, headers={
'Cookie': cookie.header_output(),
'Content-type': const.CONTENT_FORM,
}, data=data.encode())
return req.headers.get_all('Set-Cookie')
示例10: list_share
def list_share(cookie, tokens, uk, page=1):
"""获取用户已经共享的所有文件的信息
uk - user key
page - 页数, 默认为第一页.
num - 一次性获取的共享文件的数量, 默认为100个.
"""
num = 100
start = 100 * (page - 1)
url = "".join(
[
const.PAN_URL,
"pcloud/feed/getsharelist?",
"&t=",
util.timestamp(),
"&categor=0&auth_type=1&request_location=share_home",
"&start=",
str(start),
"&limit=",
str(num),
"&query_uk=",
str(uk),
"&channel=chunlei&clienttype=0&web=1",
"&bdstoken=",
tokens["bdstoken"],
]
)
req = net.urlopen(url, headers={"Cookie": cookie.header_output(), "Referer": const.SHARE_REFERER})
if req:
content = req.data
return json.loads(content.decode())
else:
return None
示例11: extend
def extend(self, iterable):
"""Append multiple records to this array."""
now = timestamp()
cfmap = {self.key.column_family: [Column(value, "", now)
for value in iterable]}
self._get_cas().batch_insert(self.key.keyspace, self.key.key, cfmap,
self.consistency)
示例12: cloud_query_task
def cloud_query_task(cookie, tokens, task_ids):
"""查询离线下载任务的信息, 比如进度, 是否完成下载等.
最好先用cloud_list_task() 来获取当前所有的任务, 然后调用这个函数来获取
某项任务的详细信息.
task_ids - 一个list, 里面至少要有一个task_id, task_id 是一个字符串
"""
url = "".join(
[
const.PAN_URL,
"rest/2.0/services/cloud_dl?method=query_task&app_id=250528",
"&bdstoken=",
tokens["bdstoken"],
"&task_ids=",
",".join(task_ids),
"&t=",
util.timestamp(),
"&channel=chunlei&clienttype=0&web=1",
]
)
req = net.urlopen(url, headers={"Cookie": cookie.header_output()})
if req:
content = req.data
return json.loads(content.decode())
else:
return None
示例13: insert_post
def insert_post(forum_id, thread_id, message, user_id, first_post=False, timestamp=None):
if timestamp is None:
timestamp = str(util.timestamp())
query_db('INSERT INTO posts (author, thread, message, time, first_post) VALUES\
(' + str(user_id) + ',' + str(thread_id) + ',"' + message + '",' + timestamp +',' + str(b2i(first_post)) + ')')
query_db('UPDATE forums SET post_count = post_count + 1 WHERE id = ' + str(forum_id))
query_db('UPDATE threads SET post_count = post_count + 1 WHERE id = ' + str(thread_id))
示例14: cloud_query_sinfo
def cloud_query_sinfo(cookie, tokens, source_path):
"""获取网盘中种子的信息, 比如里面的文件名, 文件大小等.
source_path - BT种子的绝对路径.
"""
url = "".join(
[
const.PAN_URL,
"rest/2.0/services/cloud_dl?channel=chunlei&clienttype=0&web=1",
"&method=query_sinfo&app_id=250528",
"&bdstoken=",
tokens["bdstoken"],
"&source_path=",
encoder.encode_uri_component(source_path),
"&type=2",
"&t=",
util.timestamp(),
]
)
req = net.urlopen(url, headers={"Cookie": cookie.header_output()})
if req:
content = req.data
return json.loads(content.decode())
else:
return None
示例15: cloud_list_task
def cloud_list_task(cookie, tokens, start=0):
"""获取当前离线下载的任务信息
start - 从哪个任务开始, 从0开始计数, 会获取这50条任务信息
"""
url = "".join(
[
const.PAN_URL,
"rest/2.0/services/cloud_dl?channel=chunlei&clienttype=0&web=1",
"&bdstoken=",
tokens["bdstoken"],
"&need_task_info=1&status=255",
"&start=",
str(start),
"&limit=50&method=list_task&app_id=250528",
"&t=",
util.timestamp(),
]
)
req = net.urlopen(url, headers={"Cookie": cookie.header_output()})
if req:
content = req.data
return json.loads(content.decode())
else:
return None