本文整理汇总了Python中time_utils.datetime2ts函数的典型用法代码示例。如果您正苦于以下问题:Python datetime2ts函数的具体用法?Python datetime2ts怎么用?Python datetime2ts使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了datetime2ts函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: filter_activity
def filter_activity(user_set):
results = []
#run_type
if RUN_TYPE == 1:
now_date = ts2datetime(time.time())
else:
now_date = RUN_TEST_TIME
ts = datetime2ts(now_date) - DAY
date = ts2datetime(ts)
timestamp = datetime2ts(date)
for user in user_set:
over_count = 0
for i in range(0,7):
ts = timestamp - DAY*i
result = redis_activity.hget('activity_'+str(ts), str(user))
if result:
items_dict = json.loads(result)
for item in items_dict:
weibo_count = items_dict[item]
if weibo_count > activity_threshold:
over_count += 1
if over_count == 0:
results.append(user)
else:
writer.writerow([user, 'activity'])
return results
示例2: main
def main():
RUN_TYPE = 0
if RUN_TYPE == 1:
now_ts = time.time()
else:
now_ts = datetime2ts(RUN_TEST_TIME)
now_ts = datetime2ts('2013-09-02')
date = ts2datetime(now_ts - DAY)
# auto recommendation: step 1:4
#step1: read from top es_daily_rank
top_user_set, user_dict = search_from_es(date)
#step2: filter black_uid
black_user_set = read_black_user()
subtract_user_set = top_user_set - black_user_set
#step3: filter users have been in
subtract_user_set = list(subtract_user_set)
candidate_results = filter_in(subtract_user_set)
#step4: filter rules about ip count& reposts/bereposts count&activity count
results = filter_rules(candidate_results)
#step5: get sensitive user
sensitive_user = list(get_sensitive_user(date))
results = results - set(sensitive_user) # influence user - sensitive user
new_date = ts2datetime(now_ts)
hashname_influence = "recomment_" + new_date + "_influence"
if results:
for uid in results:
#print uid
r.hset(hashname_influence, uid, "0")
hashname_sensitive = "recomment_" + new_date + "_sensitive"
if sensitive_user:
for uid in sensitive_user:
#print "sensitive"
r.hset(hashname_sensitive, uid, "0")
"""
示例3: key_words_search
def key_words_search( pre , time , start_time , keyword , type = 'in' ):
date = start_time
index_name = pre + start_time
while not es.indices.exists(index= index_name) :
time = datetime2ts(date) + DAY
date = ts2datetime(time)
index_name = pre + date
time -= 1
uid_set = set()
for i in range(time):
print index_name
query = {"query":{"bool":{"must":[{"prefix":{"text.text":keyword}}],"must_not":[],"should":[]}},"size":MAX_ITEMS,"sort":[],"facets":{},"fields":['uid']}
try :
temp = es.search(index = index_name , doc_type = 'text' , body = query)
result = temp['hits']['hits']
print "Fetch " + str(len(result))
for item in result :
uid_set.add(item['fields']['uid'][0].encode("utf-8") )
except Exception,e:
print e
raise Exception('user_list failed!')
time = datetime2ts(date) + DAY
date = ts2datetime(time)
index_name = pre + date
i += 1
示例4: get_interval_count
def get_interval_count(topic, start_ts, end_ts):
results = [0]
ts_list = []
#unit = 900
#during = Day
during = interval_count_during
start_ts = datetime2ts(ts2datetime(start_ts))
ts_list.append(start_ts)
#end_ts = datetime2ts(ts2datetime(end_ts))
# deal with the time is not the whole day
print 'before deal end_ts:', ts2date(end_ts)
if end_ts - datetime2ts(ts2datetime(end_ts))!= 0:
end_ts = datetime2ts(ts2datetime(end_ts)) + 3600 * 24
print 'get_interval_count start_ts:', ts2date(start_ts)
print 'get_interval_count end_ts:', ts2date(end_ts)
windowsize = (end_ts - start_ts) / Day
interval = (end_ts - start_ts) / During
for i in range(interval, 0, -1):
begin_ts = end_ts - during * i
over_ts = begin_ts + during
ts_list.append(over_ts)
items = db.session.query(PropagateCountNews).filter(PropagateCountNews.topic==topic ,\
PropagateCountNews.end<=over_ts ,\
PropagateCountNews.end>begin_ts ,\
PropagateCountNews.range==unit).all()
if items:
result = Merge_propagate(items)
else:
result = 0
results.append(float(result))
return ts_list, results
示例5: weibo_sort_interface
def weibo_sort_interface(username , time, sort_scope, sort_norm, arg, st, et, task_number, number):
task_number = int(task_number)
print "user_interface:", number
weibo_list = []
during = (datetime2ts(et) - datetime2ts(st)) / DAY + 1
time = 1
if during > 3:
time = 7
elif during > 16:
time = 30
query_body = {
"query":{
"terms":{
"status": [0, -1]
}
}
}
if sort_scope == 'all_limit_keyword':
running_number = es_weibo_portrait.count(index=WEIBO_RANK_KEYWORD_TASK_INDEX, doc_type=WEIBO_RANK_KEYWORD_TASK_TYPE, body=query_body)['count']
if running_number > task_number - 1:
return "more than limit"
search_id = add_task(username, type="keyword", during=during, st=st, et=et, arg=arg, sort_norm=sort_norm, sort_scope=sort_scope, time=time, number=number)
#deal with the offline task
return {"flag": True , "search_id": search_id}
elif sort_scope == 'all_nolimit':
pass
return weibo_list
示例6: scan_mapper
def scan_mapper(pre, sen_pre, r):
if RUN_TYPE:
ts = datetime2ts(ts2datetime(time.time - DAY))
else:
ts = datetime2ts('2013-09-01')
ts = str(ts)
hash_name = pre + ts
sen_hash_name = sen_pre + ts
cursor = 0
count = 0
tb = time.time()
while 1:
re_scan = r.hscan(hash_name, cursor, count=1000)
cursor = re_scan[0]
ip_dict = re_scan[1]
uid_list = ip_dict.keys()
if uid_list:
r.lpush('act_uid_list', json.dumps(uid_list))
count += len(uid_list)
ts = time.time()
print '%s : %s' %(count, ts - tb)
tb = ts
if cursor == 0:
print count
break
示例7: add_task
def add_task( user_name ,type = "keyword",range = "all" ,pre ='flow_text_' , during = '1' , start_time ='2013-09-07' ,end_time ='2013-09-07', keyword = 'hello,world' , sort_norm = 'bci' , sort_scope = 'in_limit_keyword', time = 7, isall = False, number=100 ):
time_now = int(TIME.time())
task_id = user_name + "-" + str(time_now)
tmp_list = keyword.split(',')
keyword_list = []
for item in tmp_list:
if item:
keyword_list.append(item)
body_json = {
'submit_user' : user_name ,
'keyword' : json.dumps(keyword_list),
'keyword_string': "&".join(keyword_list),
'submit_time' : ts2datetime(time_now),
'create_time': time_now,
'end_time' : datetime2ts(end_time),
'search_type' : type,
'status':0,
'range' : range ,
'user_ts' : user_name + '-'+ str(time_now),
'pre' : pre,
'during' : during ,
'start_time' : datetime2ts(start_time) ,
'sort_norm' : sort_norm ,
'sort_scope' : sort_scope,
'time' : time ,
'isall' : isall,
'number': number
}
es.index(index = USER_RANK_KEYWORD_TASK_INDEX , doc_type=USER_RANK_KEYWORD_TASK_TYPE , id=task_id, body=body_json)
return body_json["user_ts"]
示例8: get_activity_time
def get_activity_time(uid_list):
results = {}
now_ts = time.time()
now_date = ts2datetime(now_ts)
#run_type
if RUN_TYPE == 1:
timestamp = datetime2ts(now_date)
else:
timestamp = datetime2ts("2013-09-08")
activity_list_dict = {} # {uid:[activity_list], uid:[]}
for i in range(1,WEEK+1):
ts = timestamp - DAY*i
if WORK_TYPE != 0:
r_result = redis_activity.hmget('activity_'+str(ts), uid_list)
else:
r_result = []
index_name = "activity_" + str(ts2datetime(ts))
exist_bool = es_cluster.indices.exists(index=index_name)
if exist_bool:
es_results = es_cluster.mget(index=index_name, doc_type="activity", body={"ids":uid_list})["docs"]
for item in es_results:
if item['found']:
r_result.append(item['_source']['activity_dict'])
else:
r_result.append(json.dumps({}))
else:
r_result = [json.dumps(dict())]*len(uid_list)
if r_result:
for j in range(0, len(uid_list)):
uid = uid_list[j]
if uid not in activity_list_dict:
activity_list_dict[uid] = [0 for i in range(0, 96)]
user_r_result = r_result[j]
if user_r_result:
user_activity_dict = json.loads(user_r_result)
for i in range(0, 96):
try:
count = user_activity_dict[str(i)]
except:
count = 0
activity_list_dict[uid].append(count)
for uid in uid_list:
activity_list = activity_list_dict[uid]
statusnum = sum(activity_list)
signal = np.array(activity_list)
fftResult = np.abs(np.fft.fft(signal))**2
n = signal.size
freq = np.fft.fftfreq(n, d=1)
i = 0
max_val = 0
max_freq = 0
for val in fftResult:
if val>max_val and freq[i]>0:
max_val = val
max_freq = freq[i]
i += 1
results[uid] = {'statusnum': statusnum, 'activity_time': math.log(max_freq + 1)}
return results
示例9: filter_activity
def filter_activity(user_set):
results = []
now_date = ts2datetime(time.time())
# test
now_date = '2013-09-08'
ts = datetime2ts(now_date) - 24*3600
date = ts2datetime(ts)
#print 'date:', date
timestamp = datetime2ts(date)
for user in user_set:
over_count = 0
for i in range(0,7):
ts = timestamp - 3600*24*i
result = r_cluster.hget('activity_'+str(ts), str(user))
if result:
items_dict = json.loads(result)
for item in items_dict:
weibo_count = items_dict[item]
if weibo_count > activity_threshold:
over_count += 1
if over_count == 0:
results.append(user)
else:
writer.writerow([user, 'activity'])
print 'after filter activity:', len(results)
return results
示例10: update_day_sensitive
def update_day_sensitive(uid_list):
results = {}
count = 0
for uid in uid_list:
results[uid] = {"sensitive": 0, 'sensitive_string': "", 'sensitive_dict': json.dumps({})}
all_results = {}
now_ts = time.time()
if RUN_TYPE == 1:
now_date_ts = datetime2ts(ts2datetime(now_ts))
else:
now_date_ts = datetime2ts('2013-09-02')
today_sensitive_dict = {}
sensitive_results = redis_cluster.hmget("sensitive_"+str(now_date_ts), uid_list)
for item in sensitive_results:
if not item:
count += 1
continue
print type(item)
uid = uid_list[count]
item = json.loads(item)
sensitive_index = 0
sensitive_words_dict = {}
for word, count in item.iteritems():
tmp_stage = r_sensitive.hget("sensitive_words", word)
if tmp_stage:
tmp = json.loads(tmp_stage)
sensitive_index += sensitive_score_dict[str(tmp[0])] * count
sensitive_words_string = "&".join(item.keys())
results[uid] = {'sensitive': sensitive_index, "sensitive_words_string":sensitive_words_string, "sensitive_words_dict":item}
count += 1
return results
示例11: update_day_sensitive
def update_day_sensitive(uid_list):
results = {}
for uid in uid_list:
results[uid] = {"sensitive": 0, "sensitive_string": "", "sensitive_dict": json.dumps({})}
all_results = {}
now_ts = time.time()
if RUN_TYPE == 1:
now_date_ts = datetime2ts(ts2datetime(now_ts))
else:
now_date_ts = datetime2ts("2013-09-03")
today_sensitive_dict = {}
sensitive_results = redis_cluster.hmget("sensitive_" + str(now_date_ts), uid_list)
for item in sensitive_results:
for uid, words_dict in item.iteritems():
sensitive_index = 0
sensitive_words_dict = {}
if words_dict:
sensitive_words_dict = json.dumps(words_dict)
for word, count in words_dict.iter_items():
tmp_stage = r_sensitive.hget("sensitive_words", word)
if tmp_stage:
tmp = json.loads(tmp_stage)
sensitive_index += sensitive_score_dict[tmp[0]] * count
sensitive_words_string = "&".join(sensitive_words_dict.keys())
results[uid] = {
"sensitive": sensitive_index,
"sensitive_words_string": sensitive_words_string,
"sensitive_words_dict": sensitive_words_dict,
}
return results
示例12: update_day_hashtag
def update_day_hashtag(uid_list):
results = {}
all_results = {}
now_ts = time.time()
if RUN_TYPE == 1:
now_date_ts = datetime2ts(ts2datetime(now_ts))
else:
now_date_ts = datetime2ts("2013-09-02")
for i in range(WEEK, 0, -1):
ts = now_date_ts - DAY * i
count = 0
hashtag_results = redis_cluster.hmget("hashtag_" + str(ts), uid_list)
for uid in uid_list:
if uid not in results:
results[uid] = {}
hashtag_item = hashtag_results[count]
if hashtag_item:
hashtag_dict = json.loads(hashtag_item)
else:
hashtag_dict = {}
for hashtag in hashtag_dict:
try:
results[uid][hashtag] += 1
except:
results[uid][hashtag] = 1
count += 1
for uid in uid_list:
user_hashtag_dict = results[uid]
hashtag_string = "&".join(user_hashtag_dict.keys())
all_results[uid] = {"hashtag_string": hashtag_string, "hashtag_dict": json.dumps(user_hashtag_dict)}
return all_results
示例13: scan_mapper
def scan_mapper():
if RUN_TYPE:
ts = datetime2ts(ts2datetime(time.time - DAY))
else:
ts = datetime2ts('2016-05-14')
ts = str(ts)
hash_name = sen_pre_ip + ts
cursor = 0
count = 0
tb = time.time()
while 1:
re_scan = redis_ip.hscan(hash_name, cursor, count=1000)
cursor = re_scan[0]
ip_dict = re_scan[1]
uid_list = ip_dict.keys()
if uid_list:
redis_ip.lpush('sensitive_ip_uid_list', json.dumps(uid_list))
count += len(uid_list)
ts = time.time()
print '%s : %s' %(count, ts - tb)
tb = ts
if cursor == 0:
print count
break
示例14: key_words_search
def key_words_search( search_type , pre , during , start_time , keyword , search_key = '' , sort_norm = '', sort_scope = '' ,time = 1 , isall = False):
keywords = keyword.split(",")
should = []
for key in keywords:
if search_type == "hashtag":
should.append({"prefix":{"text.text": "#" + key + "#"}})
else:
should.append({"prefix":{"text.text":key}})
date = start_time
index_name = pre + start_time
while not es_9206.indices.exists(index= index_name) :
new_time = datetime2ts(date) + DAY
date = ts2datetime(new_time)
index_name = pre + date
during -= 1
uid_set = set()
for i in range(during):
print index_name
query = {"query":{"bool":{"must":[],"must_not":[],"should":should}},"size":MAX_ITEMS,"sort":[],"facets":{},"fields":['uid']}
try :
temp = es_9206.search(index = index_name , doc_type = 'text' , body = query)
result = temp['hits']['hits']
print "Fetch " + str(len(result))
for item in result :
uid_set.add(item['fields']['uid'][0].encode("utf-8") )
except Exception,e:
print e
raise Exception('user_list failed!')
new_time = datetime2ts(date) + DAY
date = ts2datetime(new_time)
index_name = pre + date
i += 1
示例15: filter_activity
def filter_activity(user_set):
results = []
now_date = ts2datetime(time.time())
now_date = '2013-09-08'
ts = datetime2ts(now_date) - 24*3600
date = ts2datetime(ts)
timestamp = datetime2ts(date)
ts = ts.replace('-','')
for user in user_set:
over_count = 0
for i in range(0,7):
ts = timestamp - 3600*24*i
result = r_cluster.hget('activity_'+str(ts), str(user))
if result:
item_dict = json.loads(result)
sorted_dict = sorted(item_dict.iteritems(), key=lambda asd:asd[1], reverse=True)
if sorted_dict[0][1] > activity_threshold:
over_count = 1
if over_count == 0:
results.append(user)
else:
writer.writerow([user, 'activity'])
print 'after filter activity: ', len(results)
return results