本文整理汇总了Python中bson.SON.update方法的典型用法代码示例。如果您正苦于以下问题:Python SON.update方法的具体用法?Python SON.update怎么用?Python SON.update使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类bson.SON
的用法示例。
在下文中一共展示了SON.update方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: specific_config_gen
# 需要导入模块: from bson import SON [as 别名]
# 或者: from bson.SON import update [as 别名]
def specific_config_gen(IC,args):
IC.base_dir = args['base_dir']
IC.annotate_dir = args['annotate_dir']
IC.groundtruth_dir = args['groundtruth_dir']
IC.correspondence = tb.tabarray(SVfile = args['frame_correspondence'])
IC.size = args['size']
IC.prefix = prefix = args.get('image_extension','.jpg')
IC.current_frame_path = None
csvs = [x for x in os.listdir(IC.annotate_dir) if x.endswith('.csv')]
csvs.sort()
Xs = [tb.tabarray(SVfile = os.path.join(IC.annotate_dir,csv)) for csv in csvs]
cns = [csv.split('.')[0] for csv in csvs]
cns = [[cn]*len(X) for (cn,X) in zip(cns,Xs)]
Xs = [X.addcols(cn,names=['clip_num']) for (cn,X) in zip(cns,Xs)]
csvs = [x for x in os.listdir(IC.groundtruth_dir) if x.endswith('.csv')]
csvs.sort()
Gs = []
fields = ['clip_num','Frame'] + xfields + yfields
for ind,csv in enumerate(csvs):
try:
g = tb.tabarray(SVfile = os.path.join(IC.groundtruth_dir,csv))
except:
x = Xs[ind].addcols([-1]*len(Xs[ind]),names=['Correctness'])
else:
g = g.addcols([csv.split('.')[0]]*len(g),names = ['clip_num'])
g = g[fields + ['Confidence']]
g.renamecol('Confidence','Correctness')
x = Xs[ind].join(g,keycols=fields)
Gs.append(x)
X = tb.tab_rowstack(Gs)
X.sort(order=['clip_num','Frame'])
Y = IC.correspondence
F = tb.fast.recarrayisin(Y[['clip_num','Frame']],X[['clip_num','Frame']])
Y = Y[F]
X = X.join(Y,keycols=['clip_num','Frame'])
params = []
for t in X:
print(t)
cn = t['clip_num']
fr = t['Frame']
box = get_darpa_box(t)
bb = box.pop('box')
xc,yc = bb.center
center = correct_center((xc,yc),IC.size,(1920,1080))
bb_new = bbox.BoundingBox(center = center,width = IC.size[0], height = IC.size[1])
p = SON([('size',IC.size),
('bounding_box',SON([('xfields',list(bb_new.xs)),('yfields',list(bb_new.ys))])),
('original_bounding_box',SON([('xfields',list(bb.xs)),('yfields',list(bb.ys))])),
('clip_num',cn),
('Frame',int(t['Original'])),
('base_dir',IC.base_dir),
('correctness',int(t['Correctness']))])
p.update(box)
p['GuessObjectType'] = p['ObjectType']
p['ObjectType'] = p['ObjectType'] if t['Correctness'] == 1 else ''
params.append(SON([('image',p)]))
return params
示例2: command
# 需要导入模块: from bson import SON [as 别名]
# 或者: from bson.SON import update [as 别名]
def command(self, command, value=1, read_preference=None,
callback=None, check=True, allowable_errors=[], connection=None, **kwargs):
"""Issue a MongoDB command.
Send command `command` to the database and return the
response. If `command` is an instance of :class:`basestring`
then the command {`command`: `value`} will be sent. Otherwise,
`command` must be an instance of :class:`dict` and will be
sent as is.
Any additional keyword arguments will be added to the final
command document before it is sent.
For example, a command like ``{buildinfo: 1}`` can be sent
using:
>>> db.command("buildinfo")
For a command where the value matters, like ``{collstats:
collection_name}`` we can do:
>>> db.command("collstats", collection_name)
For commands that take additional arguments we can use
kwargs. So ``{filemd5: object_id, root: file_root}`` becomes:
>>> db.command("filemd5", object_id, root=file_root)
:Parameters:
- `command`: document representing the command to be issued,
or the name of the command (for simple commands only).
.. note:: the order of keys in the `command` document is
significant (the "verb" must come first), so commands
which require multiple keys (e.g. `findandmodify`)
should use an instance of :class:`~bson.son.SON` or
a string and kwargs instead of a Python `dict`.
- `value` (optional): value to use for the command verb when
`command` is passed as a string
- `**kwargs` (optional): additional keyword arguments will
be added to the command document before it is sent
"""
if isinstance(command, basestring):
command = SON([(command, value)])
command.update(kwargs)
if read_preference is None:
read_preference = self._read_preference
Client(self, '$cmd').find_one(command, is_command=True,
connection=connection, read_preference=read_preference, callback=callback)
示例3: near
# 需要导入模块: from bson import SON [as 别名]
# 或者: from bson.SON import update [as 别名]
def near(col, params):
if params is None or params.count(':') != 2:
raise TypeError("$near requires three arguments. Use like /%s/$near=-73.10:42.18:0.5/ to return all records within a 0.5-mile radius of %s" % (col, col))
params = params.split(":")
params[0] = float(params[0])
params[1] = float(params[1])
params[2] = float(params[2]) / 69.0
near_dict = {"$near": [params[0], params[1]]}
dist_dict = {"$maxDistance": params[2]}
q = SON(near_dict)
q.update(dist_dict)
return q
示例4: generate_splits
# 需要导入模块: from bson import SON [as 别名]
# 或者: from bson.SON import update [as 别名]
def generate_splits(task_config,hash,colname):
base_query = SON([('__hash__',hash)])
ntrain = task_config['ntrain']
ntest = task_config['ntest']
ntrain_pos = task_config.get('ntrain_pos')
ntest_pos = task_config.get('ntest_pos')
N = task_config.get('N',10)
query = task_config['query']
base_query.update(reach_in('config',task_config.get('universe',SON([]))))
cquery = reach_in('config',query)
print('q',cquery)
print('u',base_query)
return traintest.generate_split2(DB_NAME,colname,cquery,N,ntrain,ntest,ntrain_pos=ntrain_pos,ntest_pos = ntest_pos,universe=base_query,use_negate = True)
示例5: train_test_loop
# 需要导入模块: from bson import SON [as 别名]
# 或者: from bson.SON import update [as 别名]
def train_test_loop(outfile,extract_creates,task_config,feature_config_path,hash):
feature_config = get_config(feature_config_path)
base_query = SON([('__config_hash__',hash)])
image_params = SON([('image',feature_config['image'])])
models_params = feature_config['models']
ntrain = task_config['ntrain']
ntest = task_config['ntest']
ntrain_pos = task_config.get('ntrain_pos')
N = task_config.get('N',10)
query = task_config['query']
base_query.update(reach_in('config',task_config.get('universe',SON([]))))
print('\n')
print('BASE',base_query)
print('\n')
conn = pm.Connection(document_class=SON)
db = conn['v1']
fs = gridfs.GridFS(db, collection = 'model_performance')
cquery = reach_in('config',query)
for m in models_params:
base_query_copy = base_query.copy()
base_query_copy.update(reach_in('config.model',m))
splitdata, results = train_test(cquery,'v1','features',ntrain,ntest,ntrain_pos=ntrain_pos,N=N,universe=base_query_copy)
splitpickle = cPickle.dumps(splitdata)
data = SON([('feature_config_path',feature_config_path),
('model',m),
('task',son_escape(task_config)),
('image__aggregate__',son_escape(feature_config['image']))])
filename = get_filename(data)
data.update(results)
data['filename'] = filename
fs.put(splitpickle,**data)
createCertificateDict(outfile,{'task_config':task_config,'feature_config':feature_config,'feature_config_path':feature_config_path})
示例6: put_in_split_result
# 需要导入模块: from bson import SON [as 别名]
# 或者: from bson.SON import update [as 别名]
def put_in_split_result(res,image_config_gen,m,task,ext_hash,split_id,splitres_fs):
out_record = SON([('model',m['config']['model']),
('images',son_escape(image_config_gen['images'])),
('task',son_escape(task)),
('split_id',split_id),
])
split_result = SON([])
for stat in STATS:
if stat in res and res[stat] != None:
split_result[stat] = res[stat]
filename = get_filename(out_record)
out_record['filename'] = filename
out_record['__hash__'] = ext_hash
out_record.update(split_result)
print('dump out split result...')
out_data = cPickle.dumps(SON([('split_result',res)]))
splitres_fs.put(out_data,**out_record)
示例7: put_in_performance
# 需要导入模块: from bson import SON [as 别名]
# 或者: from bson.SON import update [as 别名]
def put_in_performance(split_results,image_config_gen,m,model_hash,image_hash,perf_coll,task,ext_hash):
model_results = SON([])
for stat in STATS:
if stat in split_results[0] and split_results[0][stat] != None:
model_results[stat] = sp.array([split_result[stat] for split_result in split_results]).mean()
out_record = SON([('model',m['config']['model']),
('model_hash',model_hash),
('model_filename',m['filename']),
('images',son_escape(image_config_gen['images'])),
('image_hash',image_hash),
('task',son_escape(task)),
('__hash__',ext_hash)
])
out_record.update(model_results)
perf_coll.insert(out_record)
示例8: get_last_error
# 需要导入模块: from bson import SON [as 别名]
# 或者: from bson.SON import update [as 别名]
def get_last_error(self, db, **options):
command = SON([("getlasterror", 1)])
db = "%s.$cmd" % db.split('.', 1)[0]
command.update(options)
query = Query(collection=db, query=command)
reply = yield self.send_QUERY(query)
assert len(reply.documents) == 1
document = reply.documents[0].decode()
err = document.get("err", None)
code = document.get("code", None)
if err is not None:
if code == 11000:
raise DuplicateKeyError(err, code=code)
else:
raise OperationFailure(err, code=code)
defer.returnValue(document)
示例9: get_op_gen
# 需要导入模块: from bson import SON [as 别名]
# 或者: from bson.SON import update [as 别名]
def get_op_gen(op, oplist):
if op.get("outcertpaths") is None:
func = op["func"]
params = op.get("params")
inroots = func.inroots
outroots = func.outroots
if func.action_name == "inject":
args = op["params"]
out_args = SON([(outroot, params) for outroot in outroots])
else:
params = op.get("params", SON([]))
parents = []
for ir in inroots:
try:
parent = [op0 for op0 in oplist if ir in op0["func"].outroots][0]
except IndexError:
raise IndexError, "No parent found for at least one collection in " + repr(op0["func"].outroots)
else:
parents.append(parent)
for parent in parents:
get_op_gen(parent, oplist)
in_args = [parent["out_args"] for parent in parents]
op["incertpaths"] = [
get_cert_path(func.dbname, inroot, get_config_string(in_arg))
for (inroot, in_arg) in zip(inroots, in_args)
]
out_args = dict_union(in_args)
out_args.update(params)
op["out_args"] = out_args
op["outcertpaths"] = [
get_cert_path(func.dbname, outroot, get_config_string(out_args)) for outroot in func.outroots
]
示例10: oid_date_range_filter
# 需要导入模块: from bson import SON [as 别名]
# 或者: from bson.SON import update [as 别名]
def oid_date_range_filter(dt_from=None, dt_upto=None, field_name='_id'):
"""
constructs a range query usefull to query an ObjectId field by date
:Parameters:
- dt_from (datetime or tuple): starting date_time if tuple a datetime is constucted from tuple
- dt_upto (datetime or tuple): end date_time if tuple a datetime is constucted from tuple
- field_name: (str): optional default to '_id' field to query or None if None returns range only else returns full query
:Returns:
- range query (due to objectId structure $gt includes dt_from) while $lt dt_upto (not included)
"""
def dt(dt_or_tuple):
if isinstance(dt_or_tuple, datetime):
return dt_or_tuple
elif isinstance(dt_or_tuple, tuple):
return datetime(*dt_or_tuple)
else:
raise TypeError('dt must be a date or tuple')
q = SON()
if dt_from is not None:
q.update(SON([('$gte', ObjectId.from_datetime(dt(dt_from)))]))
if dt_upto is not None:
q.update(SON([('$lte', ObjectId.from_datetime(dt(dt_upto)))]))
return q if field_name is None else SON([(field_name, q)])
示例11: greedy_optimization
# 需要导入模块: from bson import SON [as 别名]
# 或者: from bson.SON import update [as 别名]
def greedy_optimization(outfile,task,image_certificate_file,initial_model,convolve_func,rep_limit, modifier_args,modifier):
conn = pm.Connection(document_class=bson.SON)
db = conn['v1']
opt_fs = gridfs.GridFS(db,'optimized_performance')
image_coll = db['raw_images.files']
image_fs = gridfs.GridFS(db,'raw_images')
image_certdict = cPickle.load(open(image_certificate_file))
print('using image certificate', image_certificate_file)
image_hash = image_certdict['run_hash']
image_args = image_certdict['out_args']
if convolve_func == v1f.v1like_filter_pyfft:
v1_pyfft.setup_pyfft()
filterbanks = []
perfs = []
model_configs = []
center_config = initial_model
i = 0
improving = True
while ((i < rep_limit) or rep_limit is None):
i += 1
print('Round', i)
next_configs = [m for m in get_consistent_deltas(center_config,modifier) if m not in model_configs]
if next_configs:
next_results = [get_performance(task,image_hash,image_fs,m,convolve_func) for m in next_configs]
next_perfs = [x[0] for x in next_results]
next_filterbanks = [x[1] for x in next_results]
next_perf_ac_max = np.array([x['test_accuracy'] for x in next_perfs]).max()
perf_ac_max = max([x['test_accuracy'] for x in perfs]) if perfs else 0
if next_perf_ac_max > perf_ac_max:
next_perf_ac_argmax = np.array([x['test_accuracy'] for x in next_perfs]).argmax()
center_config = next_configs[next_perf_ac_argmax]
print('\n\n')
print('new best performance is', next_perf_ac_max, 'from model', center_config)
print('\n\n')
perfs.extend(next_perfs)
model_configs.extend(next_configs)
filterbanks.extend(next_filterbanks)
else:
print('Breaking because no further optimization could be done. Best existing performance was', perf_ac_max, 'while best next performance was', next_perf_ac_max)
break
else:
print('Breaking because no next configs')
break
perfargmax = np.array([p['test_accuracy'] for p in perfs]).argmax()
best_model = model_configs[perfargmax]
best_performance = perfs[perfargmax]
out_record = SON([('initial_model',initial_model),
('task',son_escape(task)),
('images',son_escape(image_args)),
('images_hash',image_hash),
('modifier_args',son_escape(modifier_args)),
('modifier',modifier.__class__.__module__ + '.' + modifier.__class__.__name__)
])
filename = get_filename(out_record)
out_record['filename'] = filename
out_record.update(SON([('performances',perfs)]))
out_record.update(SON([('best_model',best_model)]))
out_record.update(SON([('best_performance',best_performance)]))
out_record.update(SON([('num_steps',len(model_configs))]))
out_record.update(SON([('models',model_configs)]))
outdata = cPickle.dumps(filterbanks)
opt_fs.put(outdata,**out_record)
if convolve_func == v1f.v1like_filter_pyfft:
v1_pyfft.cleanup_pyfft()
createCertificateDict(outfile,{'image_file':image_certificate_file})
示例12: find_cmd
# 需要导入模块: from bson import SON [as 别名]
# 或者: from bson.SON import update [as 别名]
def find_cmd(self, select=None, project=None, sort=None, take=None, skip=None, tailable=False, reduce_by=None):
cmd = SON([('find', self._mongo_collection.name)])
cmd.update({'filter': select}) if select else None
cmd.update({'projection': project}) if project else None
cmd.update({'sort': sort}) if sort else None
cmd.update({'skip': skip}) if skip else None
cmd.update({'limit': take}) if take else None
cmd.update({'tailable': tailable}) if tailable else None
cmd.update({'singleBatch': True})
cmd.update({'batchSize': 1000})
docs = self.db_command(cmd)['cursor']['firstBatch']
if hasattr(docs, '__iter__'):
return [d if not reduce_by else reduce_by(**d) for d in docs]
# for d in docs:
# yield d if not reduce_by else reduce_by().objectify(d)
else:
return docs if not reduce_by else reduce_by(**docs)
示例13: distinct_cmd
# 需要导入模块: from bson import SON [as 别名]
# 或者: from bson.SON import update [as 别名]
def distinct_cmd(self, key, query=None):
cmd = SON([('distinct', self._mongo_collection.name)])
cmd.update({'key': key})
cmd.update({'query': key}) if query else None
return self.db_command(cmd)['values']
示例14: count_cmd
# 需要导入模块: from bson import SON [as 别名]
# 或者: from bson.SON import update [as 别名]
def count_cmd(self, select=None, take=None, skip=None):
cmd = SON([('aggregate', self._mongo_collection.name)])
cmd.update({'query': select}) if select else None
cmd.update({'limit': take}) if take else None
cmd.update({'skip': skip}) if skip else None
return self.db_command(cmd)['n']
示例15: evaluate
# 需要导入模块: from bson import SON [as 别名]
# 或者: from bson.SON import update [as 别名]
def evaluate(outfile,feature_certificate,cpath,task,ext_hash):
conn = pm.Connection(document_class=bson.SON)
db = conn[DB_NAME]
perf_fs = gridfs.GridFS(db,'performance')
perf_coll = db['performance.files']
remove_existing(perf_coll,perf_fs,ext_hash)
feature_certdict = cPickle.load(open(feature_certificate))
feature_hash = feature_certdict['feature_hash']
image_hash = feature_certdict['image_hash']
model_hash = feature_certdict['model_hash']
image_config_gen = feature_certdict['args']['images']
model_col = db['models.files']
feature_fs = gridfs.GridFS(db,'features')
feature_col = db['features.files']
stats = ['test_accuracy','ap','auc','mean_ap','mean_auc','train_accuracy']
if isinstance(task,list):
task_list = task
else:
task_list = [task]
model_configs = get_most_recent_files(model_col,{'__hash__':model_hash})
for m in model_configs:
print('Evaluating model',m)
for task in task_list:
task['universe'] = task.get('universe',SON([]))
task['universe']['model'] = m['config']['model']
print('task', task)
classifier_kwargs = task.get('classifier_kwargs',{})
split_results = []
splits = generate_splits(task,feature_hash,'features')
for (ind,split) in enumerate(splits):
print ('split', ind)
train_data = split['train_data']
test_data = split['test_data']
train_filenames = [t['filename'] for t in train_data]
test_filenames = [t['filename'] for t in test_data]
assert set(train_filenames).intersection(test_filenames) == set([])
print('train feature extraction ...')
train_features = sp.row_stack([load_features(f['filename'],feature_fs,m,task) for f in train_data])
print('test feature extraction ...')
test_features = sp.row_stack([load_features(f['filename'],feature_fs,m,task) for f in test_data])
train_labels = split['train_labels']
test_labels = split['test_labels']
print('classifier ...')
res = svm.classify(train_features,train_labels,test_features,test_labels,classifier_kwargs)
print('Split test accuracy', res['test_accuracy'])
split_results.append(res)
model_results = SON([])
for stat in STATS:
if stat in split_results[0] and split_results[0][stat] != None:
model_results[stat] = sp.array([split_result[stat] for split_result in split_results]).mean()
out_record = SON([('model',m['config']['model']),
('model_hash',model_hash),
('model_filename',m['filename']),
('images',son_escape(image_config_gen)),
('image_hash',image_hash),
('task',son_escape(task)),
])
filename = get_filename(out_record)
out_record['filename'] = filename
out_record['config_path'] = cpath
out_record['__hash__'] = ext_hash
out_record.update(model_results)
print('dump out ...')
out_data = cPickle.dumps(SON([('split_results',split_results),('splits',splits)]))
perf_fs.put(out_data,**out_record)
createCertificateDict(outfile,{'feature_file':feature_certificate})