本文整理汇总了Python中lib.dbtools.connections.Connections.getClient方法的典型用法代码示例。如果您正苦于以下问题:Python Connections.getClient方法的具体用法?Python Connections.getClient怎么用?Python Connections.getClient使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lib.dbtools.connections.Connections
的用法示例。
在下文中一共展示了Connections.getClient方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: daily_buy_sell
# 需要导入模块: from lib.dbtools.connections import Connections [as 别名]
# 或者: from lib.dbtools.connections.Connections import getClient [as 别名]
def daily_buy_sell(security_id, start, end):
db = Connections.getClient("Mars")["Mars"]["AlgoOrders"]
deals = Connections.getClient("Mars")["Mars"]["OrderDeals"]
#result_deals = deals.aggregate([{"$match": {"cheuvreux_secid": security_id, "TransactTime":{"$gte": start, "$lte": end}, "LastMkt": "BLNK"}}, {"$project": {"LastMkt": 1, "OrderQty": 1, "_id": 0}} ])
#deal_list = deals.find({"cheuvreux_secid": security_id, "TransactTime":{"$gte": start, "$lte": end}, "LastMkt": "BLNK"})
results = []
day = start
while day <= end:
result_orders = db.aggregate([{"$match": {"cheuvreux_secid": security_id, "TransactTime":{"$gte": day, "$lt": day + timedelta(days=1)}}}, {"$project": {"Side": 1, "OrderQty": 1, "_id": 0}}])
#print result_orders
buy_volume = sum([x["OrderQty"] if x["Side"] == "1" else 0 for x in result_orders["result"]])
sell_volume = sum([x["OrderQty"] if x["Side"] == "2" else 0 for x in result_orders["result"]])
result_deals = deals.aggregate([{"$match": {"cheuvreux_secid": security_id, "TransactTime":{"$gte": day, "$lte": day + timedelta(days=1)}, "LastMkt": "BLNK"}}, {"$project": {"LastMkt": 1, "LastShares": 1, "_id": 0}} ])
blink_volume = sum([x["LastShares"] for x in result_deals["result"]])
print day
print blink_volume
print buy_volume, sell_volume
results.append({"date": day, "buy_volume": buy_volume, "sell_volume": sell_volume, "blink_volume": blink_volume})
day = day + timedelta(days = 1)
df = {"date": [k["date"] for k in results],
"buy_volume" : [x["buy_volume"] for x in results],
"sell_volume" : [x["sell_volume"] for x in results],
"blink_volume" : [x["blink_volume"] for x in results]}
return pd.DataFrame(df, index = df["date"])
示例2: top_traded
# 需要导入模块: from lib.dbtools.connections import Connections [as 别名]
# 或者: from lib.dbtools.connections.Connections import getClient [as 别名]
def top_traded():
map_func = Code ("""function() {
if (this.occ_nb_replace == 0){
emit(this.cheuvreux_secid, this.turnover * this.rate_to_euro)
}
}""")
reduce_func = Code (""" function(sec_id, quantity) {
return Array.sum(quantity)
}
""")
Connections.change_connections("production")
db = Connections.getClient("MARS")["Mars"]["AlgoOrders"]
result = db.map_reduce(map_func, reduce_func, "my_result")
turnover_executed = {}
for doc in result.find():
if doc[u"_id"] is not None and doc[u"value"] > 0:
turnover_executed[int(doc[u"_id"])] = int(doc[u"value"])
sorted_x = sorted(turnover_executed.iteritems(), key= lambda x: x[1], reverse = True)
sec_ids = []
keys = []
values = []
for i in range(20):
sec_ids.append(sorted_x[i][0])
keys.append(convert_symbol(source = "security_id", dest = "security_name", value = sorted_x[i][0])[0][0])
values.append(sorted_x[i][1])
print convert_symbol(source = "security_id", dest = "security_name", value = sorted_x[i][0])
for i in range(20):
print sorted_x[i][1]
figure, ax = plt.subplots(1, 1)
ax.bar(range(20), values)
plt.show()
示例3: upload_file
# 需要导入模块: from lib.dbtools.connections import Connections [as 别名]
# 或者: from lib.dbtools.connections.Connections import getClient [as 别名]
def upload_file(filename = 'C:\st_sim\projects\FixedIncomeReferential\Datas.xlsx'):
xls = pd.ExcelFile(filename)
data = xls.parse('Paris', header = 0 )
client = Connections.getClient('HPP')
# get mapping
mapping = client['FixedIncome']['FieldMapping']
mapping_dictionary = {}
for m in mapping.find():
mapping_dictionary[m['field']] = m
print mapping_dictionary
collection = client['FixedIncome']['Referential']
collection.remove()
for i in range(len(data.index)):
row = {}
for k in data.ix[i].keys():
if k in mapping_dictionary.keys():
row[k] = mapping_dictionary[k][convertStr(data.ix[i][k], '%d/%m/%Y')]
else:
row[k] = convertStr(data.ix[i][k], '%d/%m/%Y')
#row = dict((k, convertStr(data.ix[i][k], '%d/%m/%Y')) for k in data.ix[i].keys() if k not in mapping_dictionary.keys() else (k, convertStr(data.ix[i][k], '%d/%m/%Y')) )
collection.insert(row)
print "--------------------------------------------"
for doc in collection.find():
print doc
print " "
示例4: extract_unique
# 需要导入模块: from lib.dbtools.connections import Connections [as 别名]
# 或者: from lib.dbtools.connections.Connections import getClient [as 别名]
def extract_unique(field):
client = Connections.getClient('HPP')
collection = client['FixedIncome']['Referential']
result = collection.aggregate([{'$project': {field: 1, '_id': 0}}])
a = result['result']
l = np.unique(np.array([str(x[field]) for x in result['result']]))
print l
示例5: algos_on_a_day
# 需要导入模块: from lib.dbtools.connections import Connections [as 别名]
# 或者: from lib.dbtools.connections.Connections import getClient [as 别名]
def algos_on_a_day(bloom_code, date):
db = Connections.getClient("Mars")["Mars"]["AlgoOrders"]
mapping = Connections.getClient("Mars")["Mars"]["map_tagFIX"]
strategy_name = lambda x : mapping.aggregate([{"$match":{"tag_name": "StrategyName", "tag_value": x}}])["result"][0]["strategy_name"]
security_id = int(convert_symbol(source = "bloomberg", dest = "security_id", value = bloom_code))
print security_id
date = datetime.strptime(date, '%d/%m/%Y')
date_end = date + timedelta(days = 1)
print date
result = db.aggregate([{'$match': {"cheuvreux_secid": security_id, 'SendingTime': {'$gte': date, "$lte": date_end}, 'occ_nb_replace': 0}}])
print len(result["result"])
for r in result["result"]:
#print r["StrategyName"]
print "Algo:", r["strategy_name_mapped"], "Id", r["p_cl_ord_id"], "Size:", r["OrderQty"], "StartTime:" , r["TransactTime"], "Trader:", r["TraderName"] if r.has_key("TraderName") else "-"
return result["result"]
示例6: upload_mapping_table
# 需要导入模块: from lib.dbtools.connections import Connections [as 别名]
# 或者: from lib.dbtools.connections.Connections import getClient [as 别名]
def upload_mapping_table(filename, field):
csv = pd.read_csv(filename, ';')
client = Connections.getClient('HPP')
collection = client['FixedIncome']['FieldMapping']
collection.remove()
d = {'field': field}
for i in range(len(csv.index)):
print csv['Value'][i], csv['Mapped Value'][i]
#print np.isnan(str(csv['Mapped Value'][i])
d[str(csv['Value'][i])] = str(csv['Mapped Value'][i]) if str(csv['Mapped Value'][i]) != 'nan' else ''
collection.insert(d)
示例7: upload
# 需要导入模块: from lib.dbtools.connections import Connections [as 别名]
# 或者: from lib.dbtools.connections.Connections import getClient [as 别名]
def upload(security_id, date):
data = ft(security_id = security_id, date = date)
client = Connections.getClient('PARFLTLAB02')
collection = client['MarketData']['Tick']
collection.remove()
for i in range(len(data.index)):
document = {}
document['date'] = data.index[i].to_datetime()
document.update(data.ix[i])
document['security_id'] = security_id
collection.insert(document)
collection.ensure_index("security_id")
示例8: get_one_order
# 需要导入模块: from lib.dbtools.connections import Connections [as 别名]
# 或者: from lib.dbtools.connections.Connections import getClient [as 别名]
def get_one_order(bloom_code, date):
#db = Connections.getClient("Mars")["Mars"]["AlgoOrders"]
r = algos_on_a_day(bloom_code, date)
child = Connections.getClient("Mars")["Mars"]["OrderDeals"]
security_id = int(convert_symbol(source = "bloomberg", dest = "security_id", value = bloom_code))
order_list = [x["p_cl_ord_id"] for x in r]
result = child.aggregate([{"$match": {"p_cl_ord_id": {"$in": order_list}}}, {"$project": {"LastPx":1, "LastShares": 1, "_id": 0, "TransactTime": 1}}])
for order in result["result"]:
print order["TransactTime"], order["LastShares"], order["LastPx"]
intraday = ft(security_id = security_id, date = date)
print intraday
示例9: fieldList
# 需要导入模块: from lib.dbtools.connections import Connections [as 别名]
# 或者: from lib.dbtools.connections.Connections import getClient [as 别名]
def fieldList(cname=None, db_name="Mars", **kwargs):
#### CONNECTIONS and DB
map_name="field_map"
#client = MongoClient(connect_info)
client = Connections.getClient(db_name.upper())
req_=client[db_name][map_name].find({"collection_name":cname},{"list_columns":1,"_id":0})
#### Create the data
out=[]
for v in req_:
out.append(v)
return np.array(out[0]['list_columns'])
示例10: download
# 需要导入模块: from lib.dbtools.connections import Connections [as 别名]
# 或者: from lib.dbtools.connections.Connections import getClient [as 别名]
def download(security_id, date):
client = Connections.getClient('PARFLTLAB02')
collection = client['MarketData']['Tick']
#d = datetime.strptime(date, '%d/%m/%Y')
start = datetime.now()
#result = collection.aggregate([{'$match': {'security_id': 110}}])
result = collection.find({'security_id': 110})
print datetime.now() - start
frame = {}
for r in result:
for k, v in r.iteritems():
if k not in frame.keys(): frame[k] = []
frame[k].append(v)
df = pd.DataFrame(frame, index = frame['date'])
return df
示例11: __init__
# 需要导入模块: from lib.dbtools.connections import Connections [as 别名]
# 或者: from lib.dbtools.connections.Connections import getClient [as 别名]
def __init__(self):
import simplejson
file = open('test_types.json', 'r')
input = file.read()
file.close()
# list of test that the robot should do
self.test_list = simplejson.loads(input)
database_server = 'TEST'
database = 'DB_test1'
server_flex = 'WATFLT01'
environment = 'preprod'
source = 'CLNT1'
now = datetime.datetime.now()
delta = datetime.timedelta(days=1)
date = now - delta
dates = [date]
# self.data = DatabasePlug(database_server= database_server,
# database = database,
# server_flex = server_flex,
# environment = environment,
# source = source,
# dates = dates,
# mode = "write").get_algo_orders()
#
client = Connections.getClient('TEST')
db = client['DB_test1']
orders = db['AlgoOrders']
d1 = datetime.datetime(2013, 7, 2)
d2 = datetime.datetime(2013, 7, 10)
cursors = orders.find(
{"$and" :[
{"SendingTime" : { "$gt" : d1} },
{"SendingTime" : { "$lt" : d2} }
]
},
)
self.data = list(cursors)
示例12: runQuery
# 需要导入模块: from lib.dbtools.connections import Connections [as 别名]
# 或者: from lib.dbtools.connections.Connections import getClient [as 别名]
def runQuery(query, server, database, username=None, output=None, password=None, port=27017):
"""
username/password - is not done yet.
tokens - query string as parsed by simpleSQL
mapred_fns - list of fns ('map','reduce','query' etc supported
in the mongodb shell command)
database - name of database to connect to
"""
tokens = parseSQL(query)
print "\nTOKENS-> %s\n" % tokens
selectors,queries = constructMongoQuery(tokens)
#
# for each table we detect in the 'FROM' clause, we'll construct a
# query and store the results. For multiple-table matches,
#
# Due to persistence, i've decided to ignore the notion of 'collection'
# and use 'table' instead. This may/may not be removed in the future
#
# Owing to the fact that field selection in MongoDB is case sensitive,
# the case has to be preserved. See simpleSQL.py
#
records = {}
for tablename in tokens.tables:
conn = Connections.getClient(server)
try:
db = conn['%s'% database]
table = db['%s'% tablename]
#
# sieve out the queries of the <table.col> <cmp> <table.col>
#
print queries
print "\nQUERY SPEC -> %s" % getQuery(queries, tablename, tokens.tables)
print "\n SELECTORS -> %s" % selectors
c = table.find(spec=getQuery(queries, tablename, tokens.tables), fields=selectors[tablename])
records.setdefault(tablename, [rec for rec in c])
#print "\nRECORDS %s" % records
except Exception, e:
print "\nrunQuery: Error caught while in MongoDB, msg %s" % e
conn.disconnect()
示例13: get_field_list
# 需要导入模块: from lib.dbtools.connections import Connections [as 别名]
# 或者: from lib.dbtools.connections.Connections import getClient [as 别名]
def get_field_list(cname=None, db_server = 'MARS' , db_name="Mars"):
#-----------------------------------
# CONNECTION DB
#-----------------------------------
map_name="field_map"
client = Connections.getClient(db_server)
#-----------------------------------
# CONSTRUCT REQUEST
#-----------------------------------
req_=client[db_name][map_name].find({"collection_name":cname},{"list_columns":1,"_id":0})
client.close()
#-----------------------------------
# CONSTRUCT DATAFRAME
#-----------------------------------
out=[]
for v in req_:
out.append(v)
return np.array(out[0]['list_columns']+['_id']).tolist()
示例14: get_sequence_data_from_cl_ord_id
# 需要导入模块: from lib.dbtools.connections import Connections [as 别名]
# 或者: from lib.dbtools.connections.Connections import getClient [as 别名]
def get_sequence_data_from_cl_ord_id(cl_ord_id=None,colnames=None,db_name=None,algo_collection_name=None):
#-----------------------------------
# CONNECTION DB
#-----------------------------------
client = Connections.getClient(db_name.upper())
#-----------------------------------
# CONSTRUCT REQUEST
#-----------------------------------
# --- TEST INPUTS
if cl_ord_id is None or colnames is None or db_name is None or algo_collection_name is None:
logging.error('Bad inputs')
raise ValueError('Bad inputs')
# --- CONSTRUCT
colnames=list(set(colnames+['SendingTime','p_cl_ord_id']))
res = client[db_name][algo_collection_name].find({"p_cl_ord_id": {"$in": cl_ord_id}},dict((k,1) for k in colnames))
client.close()
#-----------------------------------
# CONSTRUCT DATAFRAME
#-----------------------------------
# Create needed
documents=[]
columns=[]
for v in res:
documents.append(v)
columns.extend(v.keys())
columns=list(set(columns))
if not documents:
return pd.DataFrame()
# Dataframe
out=pd.DataFrame.from_records(documents, columns=columns,index='SendingTime')
out=out.sort_index()
return out
示例15: deal
# 需要导入模块: from lib.dbtools.connections import Connections [as 别名]
# 或者: from lib.dbtools.connections.Connections import getClient [as 别名]
def deal(db_name="Mars", sequence_id=None, start_date=None, end_date=None, filter = None, merge_order_colnames=None):
#### DEFAULT OUTPUT
data=pd.DataFrame()
#### CONNECTIONS and DB
client = Connections.getClient(db_name.upper())
deal_db = client[db_name]["OrderDeals"]
#################################################
#### Input parsing
#################################################
# if list of sequence_id then
if sequence_id is not None:
ids=sequence_id
if isinstance(ids,basestring):
ids=[ids]
req = {"p_cl_ord_id": {"$in" : ids}}
elif (start_date is not None) and (end_date is not None):
sday=dt.datetime.strptime(start_date+'-00:00:01', '%d/%m/%Y-%H:%M:%S')
eday=dt.datetime.strptime(end_date+'-23:59:59', '%d/%m/%Y-%H:%M:%S')
# req_=deal_db.find({"TransactTime": {"$gte":sday , "$lt":eday }}).sort([("TransactTime",ASCENDING), ("ExecID",ASCENDING)])
req = {"TransactTime": {"$gte":sday , "$lt":eday }}
# Filters
if filter is not None:
req = {'$and' :[req, filter]}
res = deal_db.find(req).sort([("TransactTime",ASCENDING), ("p_exec_id",ASCENDING)])
#### CONNECTIONS
client.close();
################################################
#### Request/Extract
################################################
documents=[]
columns=[]
for v in res:
documents.append(v)
columns.extend(v.keys())
columns=list(set(columns))
if not documents:
return data
data=pd.DataFrame.from_records(documents, columns=columns,index='TransactTime')
################################################
#### HANDLING COLNAMES
################################################
# TODO: rajouter les infos de rate_to_euro etc une fosi integrer
needed_colnames=[ # - id/order infos
"p_exec_id","p_cl_ord_id",
# - deal infos
"Side","Symbol","LastPx","LastShares","LastMkt","ExecType","Currency",
"rate_to_euro","cheuvreux_secid","strategy_name_mapped"]
# - drop colnames
for x in data.columns.tolist():
if x not in needed_colnames:
data=data.drop([x],axis=1)
# - add colnames
for x in needed_colnames:
if x not in data.columns.tolist():
data[x]=np.NaN
# - rename
data = data.rename(columns={'LastPx': 'price','LastShares': 'volume', 'LastMkt' :'MIC'})
#### Side
if ('Side' in data.columns.tolist()):
tmp=np.array([np.NaN]*data.shape[0])
tmp[np.nonzero([int(x) in [1,3] for x in data['Side']])[0]]=1
tmp[np.nonzero([int(x) in [2,4] for x in data['Side'].values])[0]]=-1
if np.any(np.isnan(tmp)):
raise NameError('get_algodata:deal - Side : strange values')
data['Side']=tmp
#### exchange_id
if not ('exchange_id' in data.columns.tolist()):
data['exchange_id']=get_repository.mic2exchangeid(mic=data['MIC'].values)
################################################
#### HANDLING COLNAMES
################################################
if (merge_order_colnames is not None):
data_seq=sequence_info(sequence_id=matlabutils.uniqueext(data['p_cl_ord_id'].values).tolist())
if not all([x in data_seq.columns.tolist() for x in merge_order_colnames]):
raise NameError('get_algodata:deal - bad merge_order_colnames')
# initialize columns
for x in merge_order_colnames:
data[x]=None
# add
if data_seq.shape[0]>0:
for idx in range(0,data_seq.shape[0]):
idx_in=np.nonzero(data['p_cl_ord_id'].values==data_seq.ix[idx]['p_cl_ord_id'])[0]
for x in merge_order_colnames:
data[x][idx_in]=data_seq.ix[idx][x]
return data