本文整理汇总了Python中pymongo.errors.DocumentTooLarge方法的典型用法代码示例。如果您正苦于以下问题:Python errors.DocumentTooLarge方法的具体用法?Python errors.DocumentTooLarge怎么用?Python errors.DocumentTooLarge使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pymongo.errors
的用法示例。
在下文中一共展示了errors.DocumentTooLarge方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: new_column
# 需要导入模块: from pymongo import errors [as 别名]
# 或者: from pymongo.errors import DocumentTooLarge [as 别名]
def new_column(f, c, t, sig, n_data, t_data):
'''
f -> file name
c -> column name
t -> column type
sig -> column signature
n_data -> numerical data
t_data -> textual data
'''
key = build_column_key(f, c)
doc = {
"key": key,
"filename": f,
"column": c,
"type": t,
"signature": sig,
"t_data": t_data,
"n_data": n_data
}
try:
modeldb.insert_one(doc)
except DocumentTooLarge:
print("Trying to load: " + str(f) + " - " + str(c))
示例2: write_to_mongo
# 需要导入模块: from pymongo import errors [as 别名]
# 或者: from pymongo.errors import DocumentTooLarge [as 别名]
def write_to_mongo(prediction_json,
mongo_client,
tweet_input_mongo_database_name):
json_report = make_w6_json_report(prediction_json)
mongo_database = mongo_client[tweet_input_mongo_database_name]
mongo_collection = mongo_database["popularity_prediction_output"]
tweet_id = int(prediction_json["tweet_id"])
json_report["_id"] = tweet_id
json_report["tweet_id_string"] = repr(tweet_id)
# print("wp6", json_report)
smaller_json = copy.copy(json_report)
while True:
counter = 0
try:
mongo_collection.replace_one({"_id": tweet_id}, smaller_json, upsert=True)
break
except pymongo_errors.DocumentTooLarge:
print("It was too large.")
if counter >= (len(json_report["graph_snapshots"]) -1):
smaller_json = copy.copy(json_report)
smaller_json["graph_snapshots"] = [smaller_json["graph_snapshots"][0]]
try:
mongo_collection.replace_one({"_id": tweet_id}, smaller_json, upsert=True)
except pymongo_errors.DocumentTooLarge:
break
smaller_json = copy.copy(json_report)
smaller_json["graph_snapshots"] = [smaller_json["graph_snapshots"][0:-(counter+1)]]
counter += 1
示例3: _raise_document_too_large
# 需要导入模块: from pymongo import errors [as 别名]
# 或者: from pymongo.errors import DocumentTooLarge [as 别名]
def _raise_document_too_large(operation, doc_size, max_size):
"""Internal helper for raising DocumentTooLarge."""
if operation == "insert":
raise DocumentTooLarge("BSON document too large (%d bytes)"
" - the connected server supports"
" BSON document sizes up to %d"
" bytes." % (doc_size, max_size))
else:
# There's nothing intelligent we can say
# about size for update and remove
raise DocumentTooLarge("command document too large")
示例4: run
# 需要导入模块: from pymongo import errors [as 别名]
# 或者: from pymongo.errors import DocumentTooLarge [as 别名]
def run(self):
self.update_start()
query_context = self.get_query_context()
logger.debug('Training on {} with ctx {}'.format(self.analytic, query_context))
baseline = AnalyticBaseline.objects(analytic=self.analytic).first()
if baseline is None:
baseline = AnalyticBaseline(analytic=self.analytic)
baseline.time_range = self.range
results = []
found_keys = set()
for i, output in enumerate(query_context.query(self.analytic)):
fields = output['state']
found_keys.update(fields.keys())
results.append(fields)
if i < 512:
self.update(add_to_set__results=output, inc__count=1)
else:
self.update(inc__count=1)
baseline.keys = [ClusterKey(name=k, status=True) for k in found_keys]
baseline.cluster_events(results, min_size=1)
baseline.original_root = baseline.root
min_size = 1
max_children = 1024
# Continue to build a baseline until it works
while max_children > 0:
try:
baseline.save()
return
except DocumentTooLarge:
# try to dynamically adjust this until it fits
baseline.cluster_events(results, min_size=min_size, max_children=max_children)
baseline.original_root = baseline.root
baseline.save()
min_size += 1
max_children = int(max_children * 0.9)
# probably redundant, but useful to re-raise errors if the baseline isn't successful yet
baseline.save()