本文整理汇总了Python中more_itertools.collapse方法的典型用法代码示例。如果您正苦于以下问题:Python more_itertools.collapse方法的具体用法?Python more_itertools.collapse怎么用?Python more_itertools.collapse使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类more_itertools
的用法示例。
在下文中一共展示了more_itertools.collapse方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _write_report_lines_from_hooks
# 需要导入模块: import more_itertools [as 别名]
# 或者: from more_itertools import collapse [as 别名]
def _write_report_lines_from_hooks(self, lines):
lines.reverse()
for line in collapse(lines):
self.write_line(line)
示例2: test_collapse
# 需要导入模块: import more_itertools [as 别名]
# 或者: from more_itertools import collapse [as 别名]
def test_collapse(self):
l = [[1], 2, [[3], 4], [[[5]]]]
self.assertEqual(list(mi.collapse(l)), [1, 2, 3, 4, 5])
示例3: test_collapse_to_string
# 需要导入模块: import more_itertools [as 别名]
# 或者: from more_itertools import collapse [as 别名]
def test_collapse_to_string(self):
l = [["s1"], "s2", [["s3"], "s4"], [[["s5"]]]]
self.assertEqual(list(mi.collapse(l)), ["s1", "s2", "s3", "s4", "s5"])
示例4: test_collapse_to_bytes
# 需要导入模块: import more_itertools [as 别名]
# 或者: from more_itertools import collapse [as 别名]
def test_collapse_to_bytes(self):
l = [[b"s1"], b"s2", [[b"s3"], b"s4"], [[[b"s5"]]]]
self.assertEqual(
list(mi.collapse(l)), [b"s1", b"s2", b"s3", b"s4", b"s5"]
)
示例5: test_collapse_flatten
# 需要导入模块: import more_itertools [as 别名]
# 或者: from more_itertools import collapse [as 别名]
def test_collapse_flatten(self):
l = [[1], [2], [[3], 4], [[[5]]]]
self.assertEqual(list(mi.collapse(l, levels=1)), list(mi.flatten(l)))
示例6: test_collapse_to_list
# 需要导入模块: import more_itertools [as 别名]
# 或者: from more_itertools import collapse [as 别名]
def test_collapse_to_list(self):
l = (1, [2], (3, [4, (5,)], 'ab'))
actual = list(mi.collapse(l, base_type=list))
expected = [1, [2], 3, [4, (5,)], 'ab']
self.assertEqual(actual, expected)
示例7: test_collapse_to_level
# 需要导入模块: import more_itertools [as 别名]
# 或者: from more_itertools import collapse [as 别名]
def test_collapse_to_level(self):
l = [[1], 2, [[3], 4], [[[5]]]]
self.assertEqual(list(mi.collapse(l, levels=2)), [1, 2, 3, 4, [5]])
self.assertEqual(
list(mi.collapse(mi.collapse(l, levels=1), levels=1)),
list(mi.collapse(l, levels=2))
)
示例8: confusion_matrix
# 需要导入模块: import more_itertools [as 别名]
# 或者: from more_itertools import collapse [as 别名]
def confusion_matrix(hat_y, y, n_classes=None):
hat_y = np.array(list(collapse(hat_y)))
y = np.array(list(collapse(y)))
if n_classes is None:
classes = np.unique(np.union1d(hat_y, y))
n_classes = len(classes)
cnfm = np.zeros((n_classes, n_classes))
for j in range(y.shape[0]):
cnfm[y[j], hat_y[j]] += 1
return cnfm
示例9: _write_report_lines_from_hooks
# 需要导入模块: import more_itertools [as 别名]
# 或者: from more_itertools import collapse [as 别名]
def _write_report_lines_from_hooks(
self, lines: List[Union[str, List[str]]]
) -> None:
lines.reverse()
for line in collapse(lines):
self.write_line(line)
示例10: _index
# 需要导入模块: import more_itertools [as 别名]
# 或者: from more_itertools import collapse [as 别名]
def _index(self, xs):
"""
desc: apply index to text data and persist unique vocabulary in dataset to pickle file
args:
xs: text data
returns:
list of test, train data after it was indexed, the lookup table for the vocabulary,
and any persisted variables that may be needed
"""
def _apply_index(txt_data):
indexed = [[[unqVoc_LookUp[char] for char in seq] for seq in doc] for doc in txt_data]
return indexed
# end
x_train, x_test = xs
# create look up table for all unique vocab in test and train datasets
unqVoc = set(list(more_itertools.collapse(x_train[:] + x_test[:])))
unqVoc_LookUp = {k: v+1 for v, k in enumerate(unqVoc)}
vocab_size = len(list(unqVoc_LookUp))
x_train = _apply_index(txt_data=x_train)
x_test = _apply_index(txt_data=x_test)
# determine max sequence lengths
max_seq_len = max([len(seq) for seq in itertools.chain.from_iterable(x_train + x_test)]) # max length of sequence across all documents
max_sent_len = max([len(sent) for sent in (x_train + x_test)]) # max length of sentence across all documents
persisted_vars = {"max_seq_len":max_seq_len,
"max_sent_len":max_sent_len,
"vocab_size":vocab_size}
return [x_train, x_test, unqVoc_LookUp, persisted_vars]
# end
开发者ID:mguarin0,项目名称:HierarchicalAttentionNetworksForDocumentClassification,代码行数:36,代码来源:dataProcessing.py
示例11: test_get_files
# 需要导入模块: import more_itertools [as 别名]
# 或者: from more_itertools import collapse [as 别名]
def test_get_files(self):
project = ProjectFactory()
files = list(get_files(project))
self.assertTrue(all(issubclass(f.__class__, File) for f in files))
fields = project.submission.form_data.values()
fields = collapse(fields, base_type=StreamFieldFile)
fields = [f for f in fields if isinstance(f, StreamFieldFile)]
self.assertEqual(len(files), len(fields))
for f in files:
self.assertIn(f, fields)