本文整理汇总了Python中mo_math.Math.round方法的典型用法代码示例。如果您正苦于以下问题:Python Math.round方法的具体用法?Python Math.round怎么用?Python Math.round使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mo_math.Math
的用法示例。
在下文中一共展示了Math.round方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: int2Partition
# 需要导入模块: from mo_math import Math [as 别名]
# 或者: from mo_math.Math import round [as 别名]
def int2Partition(value):
if Math.round(value) == 0:
return edge.domain.NULL
d = datetime(str(value)[:4:], str(value)[-2:], 1)
d = d.addMilli(offset)
return edge.domain.getPartByKey(d)
示例2: get_json
# 需要导入模块: from mo_math import Math [as 别名]
# 或者: from mo_math.Math import round [as 别名]
def get_json(url, **kwargs):
"""
ASSUME RESPONSE IN IN JSON
"""
response = get(url, **kwargs)
try:
c = response.all_content
return json2value(utf82unicode(c))
except Exception as e:
if Math.round(response.status_code, decimal=-2) in [400, 500]:
Log.error(u"Bad GET response: {{code}}", code=response.status_code)
else:
Log.error(u"Good GET requests, but bad JSON", cause=e)
示例3: es_aggsop
# 需要导入模块: from mo_math import Math [as 别名]
# 或者: from mo_math.Math import round [as 别名]
def es_aggsop(es, frum, query):
query = query.copy() # WE WILL MARK UP THIS QUERY
schema = frum.schema
select = listwrap(query.select)
es_query = Data()
new_select = Data() # MAP FROM canonical_name (USED FOR NAMES IN QUERY) TO SELECT MAPPING
formula = []
for s in select:
if s.aggregate == "count" and isinstance(s.value, Variable) and s.value.var == ".":
if schema.query_path == ".":
s.pull = jx_expression_to_function("doc_count")
else:
s.pull = jx_expression_to_function({"coalesce": ["_nested.doc_count", "doc_count", 0]})
elif isinstance(s.value, Variable):
if s.aggregate == "count":
new_select["count_"+literal_field(s.value.var)] += [s]
else:
new_select[literal_field(s.value.var)] += [s]
elif s.aggregate:
formula.append(s)
for canonical_name, many in new_select.items():
for s in many:
columns = frum.schema.values(s.value.var)
if s.aggregate == "count":
canonical_names = []
for column in columns:
cn = literal_field(column.es_column + "_count")
if column.jx_type == EXISTS:
canonical_names.append(cn + ".doc_count")
es_query.aggs[cn].filter.range = {column.es_column: {"gt": 0}}
else:
canonical_names.append(cn+ ".value")
es_query.aggs[cn].value_count.field = column.es_column
if len(canonical_names) == 1:
s.pull = jx_expression_to_function(canonical_names[0])
else:
s.pull = jx_expression_to_function({"add": canonical_names})
elif s.aggregate == "median":
if len(columns) > 1:
Log.error("Do not know how to count columns with more than one type (script probably)")
# ES USES DIFFERENT METHOD FOR PERCENTILES
key = literal_field(canonical_name + " percentile")
es_query.aggs[key].percentiles.field = columns[0].es_column
es_query.aggs[key].percentiles.percents += [50]
s.pull = jx_expression_to_function(key + ".values.50\\.0")
elif s.aggregate == "percentile":
if len(columns) > 1:
Log.error("Do not know how to count columns with more than one type (script probably)")
# ES USES DIFFERENT METHOD FOR PERCENTILES
key = literal_field(canonical_name + " percentile")
if isinstance(s.percentile, text_type) or s.percetile < 0 or 1 < s.percentile:
Log.error("Expecting percentile to be a float from 0.0 to 1.0")
percent = Math.round(s.percentile * 100, decimal=6)
es_query.aggs[key].percentiles.field = columns[0].es_column
es_query.aggs[key].percentiles.percents += [percent]
es_query.aggs[key].percentiles.tdigest.compression = 2
s.pull = jx_expression_to_function(key + ".values." + literal_field(text_type(percent)))
elif s.aggregate == "cardinality":
canonical_names = []
for column in columns:
cn = literal_field(column.es_column + "_cardinality")
canonical_names.append(cn)
es_query.aggs[cn].cardinality.field = column.es_column
if len(columns) == 1:
s.pull = jx_expression_to_function(canonical_names[0] + ".value")
else:
s.pull = jx_expression_to_function({"add": [cn + ".value" for cn in canonical_names], "default": 0})
elif s.aggregate == "stats":
if len(columns) > 1:
Log.error("Do not know how to count columns with more than one type (script probably)")
# REGULAR STATS
stats_name = literal_field(canonical_name)
es_query.aggs[stats_name].extended_stats.field = columns[0].es_column
# GET MEDIAN TOO!
median_name = literal_field(canonical_name + "_percentile")
es_query.aggs[median_name].percentiles.field = columns[0].es_column
es_query.aggs[median_name].percentiles.percents += [50]
s.pull = get_pull_stats(stats_name, median_name)
elif s.aggregate == "union":
pulls = []
for column in columns:
script = {"scripted_metric": {
'init_script': 'params._agg.terms = new HashSet()',
'map_script': 'for (v in doc['+quote(column.es_column)+'].values) params._agg.terms.add(v);',
'combine_script': 'return params._agg.terms.toArray()',
'reduce_script': 'HashSet output = new HashSet(); for (a in params._aggs) { if (a!=null) for (v in a) {output.add(v)} } return output.toArray()',
}}
stats_name = encode_property(column.es_column)
if column.nested_path[0] == ".":
es_query.aggs[stats_name] = script
pulls.append(jx_expression_to_function(stats_name + ".value"))
else:
es_query.aggs[stats_name] = {
#.........这里部分代码省略.........
示例4: round
# 需要导入模块: from mo_math import Math [as 别名]
# 或者: from mo_math.Math import round [as 别名]
def round(self, interval, decimal=0):
output = self / interval
output = Math.round(output, decimal)
return output
示例5: es_aggsop
# 需要导入模块: from mo_math import Math [as 别名]
# 或者: from mo_math.Math import round [as 别名]
def es_aggsop(es, frum, query):
select = wrap([s.copy() for s in listwrap(query.select)])
# [0] is a cheat; each es_column should be a dict of columns keyed on type, like in sqlite
es_column_map = {v: frum.schema[v][0].es_column for v in query.vars()}
es_query = Data()
new_select = Data() #MAP FROM canonical_name (USED FOR NAMES IN QUERY) TO SELECT MAPPING
formula = []
for s in select:
if s.aggregate == "count" and isinstance(s.value, Variable) and s.value.var == ".":
s.pull = "doc_count"
elif isinstance(s.value, Variable):
if s.value.var == ".":
if frum.typed:
# STATISITCAL AGGS IMPLY $value, WHILE OTHERS CAN BE ANYTHING
if s.aggregate in NON_STATISTICAL_AGGS:
#TODO: HANDLE BOTH $value AND $objects TO COUNT
Log.error("do not know how to handle")
else:
s.value.var = "$value"
new_select["$value"] += [s]
else:
if s.aggregate in NON_STATISTICAL_AGGS:
#TODO: WE SHOULD BE ABLE TO COUNT, BUT WE MUST *OR* ALL LEAF VALUES TO DO IT
Log.error("do not know how to handle")
else:
Log.error('Not expecting ES to have a value at "." which {{agg}} can be applied', agg=s.aggregate)
elif s.aggregate == "count":
s.value = s.value.map(es_column_map)
new_select["count_"+literal_field(s.value.var)] += [s]
else:
s.value = s.value.map(es_column_map)
new_select[literal_field(s.value.var)] += [s]
else:
formula.append(s)
for canonical_name, many in new_select.items():
representative = many[0]
if representative.value.var == ".":
Log.error("do not know how to handle")
else:
field_name = representative.value.var
# canonical_name=literal_field(many[0].name)
for s in many:
if s.aggregate == "count":
es_query.aggs[literal_field(canonical_name)].value_count.field = field_name
s.pull = literal_field(canonical_name) + ".value"
elif s.aggregate == "median":
# ES USES DIFFERENT METHOD FOR PERCENTILES
key = literal_field(canonical_name + " percentile")
es_query.aggs[key].percentiles.field = field_name
es_query.aggs[key].percentiles.percents += [50]
s.pull = key + ".values.50\.0"
elif s.aggregate == "percentile":
# ES USES DIFFERENT METHOD FOR PERCENTILES
key = literal_field(canonical_name + " percentile")
if isinstance(s.percentile, basestring) or s.percetile < 0 or 1 < s.percentile:
Log.error("Expecting percentile to be a float from 0.0 to 1.0")
percent = Math.round(s.percentile * 100, decimal=6)
es_query.aggs[key].percentiles.field = field_name
es_query.aggs[key].percentiles.percents += [percent]
s.pull = key + ".values." + literal_field(unicode(percent))
elif s.aggregate == "cardinality":
# ES USES DIFFERENT METHOD FOR CARDINALITY
key = literal_field(canonical_name + " cardinality")
es_query.aggs[key].cardinality.field = field_name
s.pull = key + ".value"
elif s.aggregate == "stats":
# REGULAR STATS
stats_name = literal_field(canonical_name)
es_query.aggs[stats_name].extended_stats.field = field_name
# GET MEDIAN TOO!
median_name = literal_field(canonical_name + " percentile")
es_query.aggs[median_name].percentiles.field = field_name
es_query.aggs[median_name].percentiles.percents += [50]
s.pull = {
"count": stats_name + ".count",
"sum": stats_name + ".sum",
"min": stats_name + ".min",
"max": stats_name + ".max",
"avg": stats_name + ".avg",
"sos": stats_name + ".sum_of_squares",
"std": stats_name + ".std_deviation",
"var": stats_name + ".variance",
"median": median_name + ".values.50\.0"
}
elif s.aggregate == "union":
# USE TERMS AGGREGATE TO SIMULATE union
stats_name = literal_field(canonical_name)
es_query.aggs[stats_name].terms.field = field_name
es_query.aggs[stats_name].terms.size = Math.min(s.limit, MAX_LIMIT)
s.pull = stats_name + ".buckets.key"
else:
# PULL VALUE OUT OF THE stats AGGREGATE
#.........这里部分代码省略.........
示例6: add_instances
# 需要导入模块: from mo_math import Math [as 别名]
# 或者: from mo_math.Math import round [as 别名]
def add_instances(self, net_new_utility, remaining_budget):
prices = self.pricing()
for p in prices:
if net_new_utility <= 0 or remaining_budget <= 0:
break
if p.current_price == None:
Log.note("{{type}} has no current price",
type=p.type.instance_type
)
continue
if self.settings.utility[p.type.instance_type].blacklist or \
p.availability_zone in listwrap(self.settings.utility[p.type.instance_type].blacklist_zones):
Log.note("{{type}} in {{zone}} skipped due to blacklist", type=p.type.instance_type, zone=p.availability_zone)
continue
# DO NOT BID HIGHER THAN WHAT WE ARE WILLING TO PAY
max_acceptable_price = p.type.utility * self.settings.max_utility_price + p.type.discount
max_bid = Math.min(p.higher_price, max_acceptable_price, remaining_budget)
min_bid = p.price_80
if min_bid > max_acceptable_price:
Log.note(
"Price of ${{price}}/hour on {{type}}: Over remaining acceptable price of ${{remaining}}/hour",
type=p.type.instance_type,
price=min_bid,
remaining=max_acceptable_price
)
continue
elif min_bid > remaining_budget:
Log.note(
"Did not bid ${{bid}}/hour on {{type}}: Over budget of ${{remaining_budget}}/hour",
type=p.type.instance_type,
bid=min_bid,
remaining_budget=remaining_budget
)
continue
elif min_bid > max_bid:
Log.error("not expected")
naive_number_needed = int(Math.round(float(net_new_utility) / float(p.type.utility), decimal=0))
limit_total = None
if self.settings.max_percent_per_type < 1:
current_count = sum(1 for a in self.active if a.launch_specification.instance_type == p.type.instance_type and a.launch_specification.placement == p.availability_zone)
all_count = sum(1 for a in self.active if a.launch_specification.placement == p.availability_zone)
all_count = max(all_count, naive_number_needed)
limit_total = int(Math.floor((all_count * self.settings.max_percent_per_type - current_count) / (1 - self.settings.max_percent_per_type)))
num = Math.min(naive_number_needed, limit_total, self.settings.max_requests_per_type)
if num < 0:
Log.note(
"{{type}} is over {{limit|percent}} of instances, no more requested",
limit=self.settings.max_percent_per_type,
type=p.type.instance_type
)
continue
elif num == 1:
min_bid = Math.min(Math.max(p.current_price * 1.1, min_bid), max_acceptable_price)
price_interval = 0
else:
price_interval = Math.min(min_bid / 10, (max_bid - min_bid) / (num - 1))
for i in range(num):
bid_per_machine = min_bid + (i * price_interval)
if bid_per_machine < p.current_price:
Log.note(
"Did not bid ${{bid}}/hour on {{type}}: Under current price of ${{current_price}}/hour",
type=p.type.instance_type,
bid=bid_per_machine - p.type.discount,
current_price=p.current_price
)
continue
if bid_per_machine - p.type.discount > remaining_budget:
Log.note(
"Did not bid ${{bid}}/hour on {{type}}: Over remaining budget of ${{remaining}}/hour",
type=p.type.instance_type,
bid=bid_per_machine - p.type.discount,
remaining=remaining_budget
)
continue
try:
if self.settings.ec2.request.count == None or self.settings.ec2.request.count != 1:
Log.error("Spot Manager can only request machine one-at-a-time")
new_requests = self._request_spot_instances(
price=bid_per_machine,
availability_zone_group=p.availability_zone,
instance_type=p.type.instance_type,
kwargs=copy(self.settings.ec2.request)
)
Log.note(
"Request {{num}} instance {{type}} in {{zone}} with utility {{utility}} at ${{price}}/hour",
num=len(new_requests),
type=p.type.instance_type,
zone=p.availability_zone,
utility=p.type.utility,
price=bid_per_machine
#.........这里部分代码省略.........
示例7: request
# 需要导入模块: from mo_math import Math [as 别名]
# 或者: from mo_math.Math import round [as 别名]
def request(method, url, headers=None, zip=None, retry=None, **kwargs):
"""
JUST LIKE requests.request() BUT WITH DEFAULT HEADERS AND FIXES
DEMANDS data IS ONE OF:
* A JSON-SERIALIZABLE STRUCTURE, OR
* LIST OF JSON-SERIALIZABLE STRUCTURES, OR
* None
Parameters
* zip - ZIP THE REQUEST BODY, IF BIG ENOUGH
* json - JSON-SERIALIZABLE STRUCTURE
* retry - {"times": x, "sleep": y} STRUCTURE
THE BYTE_STRINGS (b"") ARE NECESSARY TO PREVENT httplib.py FROM **FREAKING OUT**
IT APPEARS requests AND httplib.py SIMPLY CONCATENATE STRINGS BLINDLY, WHICH
INCLUDES url AND headers
"""
global _warning_sent
global request_count
if not _warning_sent and not default_headers:
Log.warning(text_type(
"The pyLibrary.env.http module was meant to add extra " +
"default headers to all requests, specifically the 'Referer' " +
"header with a URL to the project. Use the `pyLibrary.debug.constants.set()` " +
"function to set `pyLibrary.env.http.default_headers`"
))
_warning_sent = True
if isinstance(url, list):
# TRY MANY URLS
failures = []
for remaining, u in jx.countdown(url):
try:
response = request(method, u, retry=retry, **kwargs)
if Math.round(response.status_code, decimal=-2) not in [400, 500]:
return response
if not remaining:
return response
except Exception as e:
e = Except.wrap(e)
failures.append(e)
Log.error(u"Tried {{num}} urls", num=len(url), cause=failures)
if 'session' in kwargs:
session = kwargs['session']
del kwargs['session']
sess = Null
else:
sess = session = sessions.Session()
with closing(sess):
if PY2 and isinstance(url, text_type):
# httplib.py WILL **FREAK OUT** IF IT SEES ANY UNICODE
url = url.encode('ascii')
try:
set_default(kwargs, {"zip":zip, "retry": retry}, DEFAULTS)
_to_ascii_dict(kwargs)
# HEADERS
headers = kwargs['headers'] = unwrap(set_default(headers, session.headers, default_headers))
_to_ascii_dict(headers)
del kwargs['headers']
# RETRY
retry = wrap(kwargs['retry'])
if isinstance(retry, Number):
retry = set_default({"times":retry}, DEFAULTS['retry'])
if isinstance(retry.sleep, Duration):
retry.sleep = retry.sleep.seconds
del kwargs['retry']
# JSON
if 'json' in kwargs:
kwargs['data'] = value2json(kwargs['json']).encode('utf8')
del kwargs['json']
# ZIP
set_default(headers, {'Accept-Encoding': 'compress, gzip'})
if kwargs['zip'] and len(coalesce(kwargs.get('data'))) > 1000:
compressed = convert.bytes2zip(kwargs['data'])
headers['content-encoding'] = 'gzip'
kwargs['data'] = compressed
del kwargs['zip']
except Exception as e:
Log.error(u"Request setup failure on {{url}}", url=url, cause=e)
errors = []
for r in range(retry.times):
if r:
Till(seconds=retry.sleep).wait()
try:
DEBUG and Log.note(u"http {{method|upper}} to {{url}}", method=method, url=text_type(url))
request_count += 1
return session.request(method=method, headers=headers, url=str(url), **kwargs)
except Exception as e:
e = Except.wrap(e)
#.........这里部分代码省略.........
示例8: request
# 需要导入模块: from mo_math import Math [as 别名]
# 或者: from mo_math.Math import round [as 别名]
def request(method, url, zip=None, retry=None, **kwargs):
"""
JUST LIKE requests.request() BUT WITH DEFAULT HEADERS AND FIXES
DEMANDS data IS ONE OF:
* A JSON-SERIALIZABLE STRUCTURE, OR
* LIST OF JSON-SERIALIZABLE STRUCTURES, OR
* None
Parameters
* zip - ZIP THE REQUEST BODY, IF BIG ENOUGH
* json - JSON-SERIALIZABLE STRUCTURE
* retry - {"times": x, "sleep": y} STRUCTURE
THE BYTE_STRINGS (b"") ARE NECESSARY TO PREVENT httplib.py FROM **FREAKING OUT**
IT APPEARS requests AND httplib.py SIMPLY CONCATENATE STRINGS BLINDLY, WHICH
INCLUDES url AND headers
"""
global _warning_sent
if not default_headers and not _warning_sent:
_warning_sent = True
Log.warning(
"The pyLibrary.env.http module was meant to add extra "
"default headers to all requests, specifically the 'Referer' "
"header with a URL to the project. Use the `pyLibrary.debug.constants.set()` "
"function to set `pyLibrary.env.http.default_headers`"
)
if isinstance(url, list):
# TRY MANY URLS
failures = []
for remaining, u in jx.countdown(url):
try:
response = request(method, u, zip=zip, retry=retry, **kwargs)
if Math.round(response.status_code, decimal=-2) not in [400, 500]:
return response
if not remaining:
return response
except Exception as e:
e = Except.wrap(e)
failures.append(e)
Log.error("Tried {{num}} urls", num=len(url), cause=failures)
if b"session" in kwargs:
session = kwargs[b"session"]
del kwargs[b"session"]
else:
session = sessions.Session()
session.headers.update(default_headers)
if zip is None:
zip = ZIP_REQUEST
if isinstance(url, unicode):
# httplib.py WILL **FREAK OUT** IF IT SEES ANY UNICODE
url = url.encode("ascii")
_to_ascii_dict(kwargs)
timeout = kwargs[b'timeout'] = coalesce(kwargs.get(b'timeout'), default_timeout)
if retry == None:
retry = Data(times=1, sleep=0)
elif isinstance(retry, Number):
retry = Data(times=retry, sleep=1)
else:
retry = wrap(retry)
if isinstance(retry.sleep, Duration):
retry.sleep = retry.sleep.seconds
set_default(retry, {"times": 1, "sleep": 0})
if b'json' in kwargs:
kwargs[b'data'] = convert.value2json(kwargs[b'json']).encode("utf8")
del kwargs[b'json']
try:
headers = kwargs[b"headers"] = unwrap(coalesce(wrap(kwargs)[b"headers"], {}))
set_default(headers, {b"accept-encoding": b"compress, gzip"})
if zip and len(coalesce(kwargs.get(b"data"))) > 1000:
compressed = convert.bytes2zip(kwargs[b"data"])
headers[b'content-encoding'] = b'gzip'
kwargs[b"data"] = compressed
_to_ascii_dict(headers)
else:
_to_ascii_dict(headers)
except Exception as e:
Log.error("Request setup failure on {{url}}", url=url, cause=e)
errors = []
for r in range(retry.times):
if r:
Till(seconds=retry.sleep).wait()
try:
if DEBUG:
Log.note("http {{method}} to {{url}}", method=method, url=url)
return session.request(method=method, url=url, **kwargs)
except Exception as e:
errors.append(Except.wrap(e))
#.........这里部分代码省略.........