本文整理汇总了Python中all函数的典型用法代码示例。如果您正苦于以下问题:Python all函数的具体用法?Python all怎么用?Python all使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了all函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_with_shift_that_spans_previous_midnight
def test_with_shift_that_spans_previous_midnight():
"""
0 1 2 3 4 5 6 7 8 9 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4
*-*-*-**]
[*-*-*-*-*]
[*-*-*-*-*]
[*-*-*-*-*]
"""
shifts = []
# 3 hour shifts, staggered 2 hours apart.
shifts.append(ShiftDict(start_time=yesterday_at_hour(23), shift_minutes=5 * HOUR))
shifts.append(ShiftDict(start_time=today_at_hour(4), shift_minutes=5 * HOUR))
shifts.append(ShiftDict(start_time=today_at_hour(9), shift_minutes=5 * HOUR))
shifts.append(ShiftDict(start_time=today_at_hour(14), shift_minutes=5 * HOUR))
data = shifts_to_tabular_data(_wrap_in_lists(shifts), datetime.date.today())
assert get_num_columns(data) == ONE_DAY
assert_columns_all_at_correct_location(data)
assert len(data) == 5 * HOUR + 4
assert data[0]['columns'] == 4 * HOUR
assert all(list(c['columns'] == 5 * HOUR for c in data[1:4]))
assert all(list(c['columns'] == 1 for c in data[4:]))
示例2: invest
def invest(positions,num_trials):
'''Returns a dataframe with percentage returns of the form [trial(i),position(j)],
where entry[i,j] represents the i'th independent trial of buying 'j' investments
of value 1000/'j' in an instrument the returns double 51% and zero 49% of the time.'''
#Exception Handling (positions argument)
if type(positions)!=list:
raise NotListError
if all([(type(x)==int or type(x)==float) for x in positions])==False:
raise NotNumError
if all([x % 1==0.0 for x in positions])==False:
raise NotIntError
if all([(0<x<=1000) for x in positions])==False:
raise InvalidPosError
#Excption Handling (num_trials argument)
if (type(num_trials)!=int and type(num_trials)!=float):
raise TrialNotNumError
if 0>=(num_trials):
raise TrialNegError
#Program
position_value = 1000/np.array(positions)
cumu_ret = DataFrame(columns=positions,index=np.arange(1,num_trials+1))
for i in position_value:
col=1000/i
cumu_ret[col] = col
cumu_ret[col] = cumu_ret[col].map(calcCumRet)
daily_ret = (cumu_ret/1000)-1
return daily_ret
示例3: test_history
def test_history(self, manager, my_vcr):
with my_vcr.use_cassette("pipeline/history_Consumer_Website"):
name = "Consumer_Website"
result = manager.history(name)
assert all(isinstance(i, pipeline.PipelineInstance) for i in result)
assert all(i.data.name == name for i in result)
示例4: add_full_barcode_adapter_sets
def add_full_barcode_adapter_sets(matching_sets):
"""
This function adds some new 'full' adapter sequences based on what was already found. For
example, if the ligation adapters and the reverse barcode adapters are found, it assumes we are
looking at a native barcoding run and so it adds the complete native barcoding adapter
sequences (with the barcode's upstream and downstream context included).
"""
matching_set_names = [x.name for x in matching_sets]
for i in range(1, 97):
# Native barcode full sequences
if all(x in matching_set_names
for x in ['SQK-NSK007', 'Barcode ' + str(i) + ' (reverse)']):
matching_sets.append(make_full_native_barcode_adapter(i))
# Rapid barcode full sequences
if all(x in matching_set_names
for x in ['SQK-NSK007', 'Rapid', 'Barcode ' + str(i) + ' (forward)']):
matching_sets.append(make_full_rapid_barcode_adapter(i))
# Added for test
# PCR barcode full sequences
if all(x in matching_set_names
for x in ['PCR', 'Barcode ' + str(i) + ' (forward)']):
matching_sets.append(make_full_PCR_barcode_adapter(i))
return matching_sets
示例5: test_multiple_problems
def test_multiple_problems(self):
if MPI:
# split the comm and run an instance of the Problem in each subcomm
subcomm = self.comm.Split(self.comm.rank)
prob = Problem(Group(), impl=impl, comm=subcomm)
size = 5
value = self.comm.rank + 1
values = np.ones(size)*value
A1 = prob.root.add('A1', IndepVarComp('x', values))
C1 = prob.root.add('C1', ABCDArrayComp(size))
prob.root.connect('A1.x', 'C1.a')
prob.root.connect('A1.x', 'C1.b')
prob.setup(check=False)
prob.run()
# check the first output array and store in result
self.assertTrue(all(prob['C1.c'] == np.ones(size)*(value*2)))
result = prob['C1.c']
# gather the results from the separate processes/problems and check
# for expected values
results = self.comm.allgather(result)
self.assertEqual(len(results), self.comm.size)
for n in range(self.comm.size):
expected = np.ones(size)*2*(n+1)
self.assertTrue(all(results[n] == expected))
示例6: test_add_strategy_with_setitem
def test_add_strategy_with_setitem(self):
sdict = StrategyDict("sdict")
sdict["add"] = operator.add
sdict["mul"] = operator.mul
sdict["+"] = operator.add
assert len(sdict) == 2
assert set(sdict.keys()) == {("add", "+"), ("mul",)}
assert all(name in dir(sdict) for name in {"add", "+", "mul"})
assert all(name in vars(sdict) for name in {"add", "+", "mul"})
assert sdict.add(2, 3) == 5 == sdict["add"](2, 3)
assert sdict.mul(2, 3) == 6 == sdict["mul"](2, 3)
assert sdict(7, 8) == 15 == sdict.default(7, 8)
del sdict["+"]
assert len(sdict) == 2
del sdict.add
assert len(sdict) == 1
assert sdict(7, 8) == NotImplemented == sdict.default(7, 8)
sdict["pow"] = operator.pow
assert len(sdict) == 2
assert sdict(2, 3) == 8 == sdict.default(2, 3)
assert sdict.pow(5, 2) == 25 == sdict["pow"](5, 2)
示例7: _parse_table_name
def _parse_table_name(self, table_id):
"""Parse a table name in the form of appid_YYYY_MM or
YYYY_MM_appid and return a tuple consisting of YYYY-MM and the app id.
Args:
table_id: The table id as listed by BigQuery.
Returns:
Tuple containing year/month and app id. Returns None, None if the
table id cannot be parsed.
"""
# Prefix date
attributes = table_id.split('_')
year_month = "-".join(attributes[:2])
app_id = "-".join(attributes[2:])
# Check if date parsed correctly
if year_month.count("-") == 1 and all(
[num.isdigit() for num in year_month.split('-')]):
return year_month, app_id
# Postfix date
attributes = table_id.split('_')
year_month = "-".join(attributes[-2:])
app_id = "-".join(attributes[:-2])
# Check if date parsed correctly
if year_month.count("-") == 1 and all(
[num.isdigit() for num in year_month.split('-')]):
return year_month, app_id
return None, None
示例8: from_list
def from_list(index, queues):
"""Create a queue using the queue reference from `queues[index]`.
Args:
index: An integer scalar tensor that determines the input that gets
selected.
queues: A list of `QueueBase` objects.
Returns:
A `QueueBase` object.
Raises:
TypeError: When `queues` is not a list of `QueueBase` objects,
or when the data types of `queues` are not all the same.
"""
if ((not queues) or
(not isinstance(queues, list)) or
(not all(isinstance(x, QueueBase) for x in queues))):
raise TypeError("A list of queues expected")
dtypes = queues[0].dtypes
if not all([dtypes == q.dtypes for q in queues[1:]]):
raise TypeError("Queues do not have matching component dtypes.")
queue_refs = [x.queue_ref for x in queues]
selected_queue = control_flow_ops.ref_select(index, queue_refs)
# TODO(josh11b): Unify the shapes of the queues too?
return QueueBase(dtypes=dtypes, shapes=None, queue_ref=selected_queue)
示例9: check_grade
def check_grade(self, test_output, truth_output, test_input):
def get_ans(output):
pat = re.compile('Student has an ([A-D]) grade',re.IGNORECASE)
m = pat.search(output)
if m is None:
p2 = re.compile('Student has (failed) the course',re.IGNORECASE)
m2 = p2.search(output)
if m2 is None:
raise Exception("GRADE: no answer found")
return m2.group(1)
return m.group(1)
truth_answer = get_ans(truth_output)
grades = set(['A','B','C','D'])
grade_patterns = {g: re.compile("Student has an {} grade".format(g),
re.IGNORECASE)
for g in grades}
if truth_answer in grades:
no_neg_match = all([rgx.search(test_output) is None
for g, rgx in grade_patterns.items()
if g != truth_answer])
pos_match = grade_patterns[truth_answer].search(test_output)
elif truth_answer == 'failed':
failure_rgx = re.compile("Student has failed the course",
re.IGNORECASE)
no_neg_match = all([rgx.search(test_output) is None
for g, rgx in grade_patterns.items()])
pos_match = failure_rgx.search(test_output)
else:
raise ValueError("Unknown grades truth {}".format(truth_answer))
return pos_match is not None and no_neg_match
示例10: compilable
def compilable(cls, clf):
"""
Verifies that the given fitted model is eligible to be compiled.
Returns True if the model is eligible, and False otherwise.
Parameters
----------
clf:
A fitted regression tree/ensemble.
"""
# TODO - is there an established way to check `is_fitted``?
if isinstance(clf, DecisionTreeRegressor):
return clf.n_outputs_ == 1 and clf.n_classes_ == 1 \
and clf.tree_ is not None
if isinstance(clf, GradientBoostingRegressor):
return clf.estimators_.size and all(cls.compilable(e)
for e in clf.estimators_.flat)
if isinstance(clf, ForestRegressor):
estimators = np.asarray(clf.estimators_)
return estimators.size and all(cls.compilable(e)
for e in estimators.flat)
return False
示例11: sanitize_indices
def sanitize_indices(indices):
"""Check and possibly sanitize indices.
Parameters
----------
indices : int, slice, or sequence of ints and slices
If an int or slice is passed in, it is converted to a
1-tuple.
Returns
-------
2-tuple
('point', indices) if all `indices` are ints, or
('view', indices) if some `indices` are slices.
Raises
------
TypeError
If `indices` is not all ints or slices.
"""
if isinstance(indices, int) or isinstance(indices, slice):
return sanitize_indices((indices,))
elif all(isinstance(i, int) for i in indices):
return 'point', indices
elif all(isinstance(i, int) or isinstance(i, slice) for i in indices):
return 'view', indices
else:
raise TypeError("Index must be a sequence of ints and slices")
示例12: _non_dominated_front_old
def _non_dominated_front_old(iterable, key=lambda x: x, allowequality=True):
"""Return a subset of items from iterable which are not dominated by any
other item in iterable."""
items = list(iterable)
keys = dict((i, key(i)) for i in items)
dim = len(keys.values()[0])
if any(dim != len(k) for k in keys.values()):
raise ValueError("Wrong tuple size.")
# Make a dictionary that holds the items another item dominates.
dominations = collections.defaultdict(lambda: [])
for i in items:
for j in items:
if allowequality:
if all(keys[i][k] < keys[j][k] for k in xrange(dim)):
dominations[i].append(j)
else:
if all(keys[i][k] <= keys[j][k] for k in xrange(dim)):
dominations[i].append(j)
dominates = lambda i, j: j in dominations[i]
res = set()
items = set(items)
for i in items:
res.add(i)
for j in list(res):
if i is j:
continue
if dominates(j, i):
res.remove(i)
break
elif dominates(i, j):
res.remove(j)
return res
示例13: test_default_instance_initialize
def test_default_instance_initialize():
"""
Testing the default _instance_initialize provided by module.
"""
class M1(Module):
def __init__(self):
super(M1, self).__init__()
self.a = T.dscalar()
self.b = T.lscalar()
self.c = T.lvector()
class M2(Module):
def __init__(self):
super(M2, self).__init__()
self.a = T.lscalar()
self.x = M1()
self.y = self.x
self.z = M1()
m = M2().make(a = 13,
x = dict(a = 1, b = 2, c = [3, 4]),
z = dict(a = 5, b = 6, c = [7, 8]))
assert m.a == 13
assert m.x.a == 1
assert m.x.b == 2
assert all(m.x.c == [3, 4])
assert m.y.a == 1
assert m.y.b == 2
assert all(m.y.c == [3, 4])
assert m.z.a == 5
assert m.z.b == 6
assert all(m.z.c == [7, 8])
示例14: test_with_shift_that_spans_upcoming_midnight
def test_with_shift_that_spans_upcoming_midnight():
"""
0 1 2 3 4 5 6 7 8 9 1 1 1 1 1 1 1 1 1 1 2 2 2 2 2
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4
[*-*-*-*-*]
[*-*-*-*-*]
[*-*-*-*-*]
[*-*-*-*-
"""
shifts = []
# 3 hour shifts, staggered 2 hours apart.
shifts.append(ShiftDict(start_time=today_at_hour(5), shift_minutes=5 * HOUR))
shifts.append(ShiftDict(start_time=today_at_hour(10), shift_minutes=5 * HOUR))
shifts.append(ShiftDict(start_time=today_at_hour(15), shift_minutes=5 * HOUR))
shifts.append(ShiftDict(start_time=today_at_hour(20), shift_minutes=5 * HOUR))
data = shifts_to_tabular_data(_wrap_in_lists(shifts), datetime.date.today())
assert get_num_columns(data) == ONE_DAY
assert_columns_all_at_correct_location(data)
assert len(data) == 5 * HOUR + 4
assert all(c['columns'] == 1 for c in data[:5 * HOUR])
assert all(c['columns'] == 5 * HOUR for c in data[5 * HOUR:5 * HOUR + 3])
assert data[-1]['columns'] == 4 * HOUR
示例15: test_http_pool_key_fields
def test_http_pool_key_fields(self):
"""Assert the HTTPPoolKey fields are honored when selecting a pool."""
connection_pool_kw = {
'timeout': timeout.Timeout(3.14),
'retries': retry.Retry(total=6, connect=2),
'block': True,
'strict': True,
'source_address': '127.0.0.1',
}
p = PoolManager()
conn_pools = [
p.connection_from_url('http://example.com/'),
p.connection_from_url('http://example.com:8000/'),
p.connection_from_url('http://other.example.com/'),
]
for key, value in connection_pool_kw.items():
p.connection_pool_kw[key] = value
conn_pools.append(p.connection_from_url('http://example.com/'))
assert all(
x is not y
for i, x in enumerate(conn_pools)
for j, y in enumerate(conn_pools)
if i != j
)
assert all(isinstance(key, PoolKey) for key in p.pools.keys())