本文整理汇总了Python中util.flatten函数的典型用法代码示例。如果您正苦于以下问题:Python flatten函数的具体用法?Python flatten怎么用?Python flatten使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了flatten函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: reduce_anno_s
def reduce_anno_s(tmpl, cls, mtd, s):
curried_e = partial(reduce_anno_e, tmpl, cls, mtd)
curried_s = partial(reduce_anno_s, tmpl, cls, mtd)
if s.kind in [C.S.EXP, C.S.ASSERT, C.S.RETURN]:
red_e = curried_e(s.e)
if type(red_e) is list: return red_e
else: s.e = red_e
elif s.kind == C.S.ASSIGN:
s.le = curried_e(s.le)
s.re = curried_e(s.re)
elif s.kind == C.S.IF:
s.e = curried_e(s.e)
s.t = util.flatten(map(curried_s, s.t))
s.f = util.flatten(map(curried_s, s.f))
elif s.kind in [C.S.WHILE, C.S.REPEAT]:
s.e = curried_e(s.e)
s.b = util.flatten(map(curried_s, s.b))
elif s.kind == C.S.FOR:
s.i = curried_e(s.i)
s.init = curried_e(s.init)
s.b = util.flatten(map(curried_s, s.b))
return [s]
示例2: some_stats
def some_stats(corpus, docid, termv):
doc = corpus.get_doc(docid)
print(docid, "vocabulary {}:{} == {:.2}".format(doc.unique_len, doc.total_len, doc.unique_len / doc.total_len))
# exclude terms which appear only in one document (names, twitter handles)
termv = list(filter(lambda t: corpus.get_term(t).document_frequency > 1, termv))
# function: return 5 terms sorted by a key function
bykeyfun = lambda kf: sorted(zip(map(kf, termv), termv), reverse=True)[:10]
# key functions
tf = lambda stem: corpus.get_term(stem).term_frequency(docid)
idf = lambda stem: corpus.get_term(stem).inverse_document_frequency
tfidf = lambda stem: tf(stem) * idf(stem)
# table spec
cols = sorted(
{
"tf": tf,
"idf": idf,
"df": lambda stem: corpus.get_term(stem).document_frequency,
"cf": lambda stem: corpus.get_term(stem).corpus_frequency,
"tf*idf": tfidf,
}.items()
)
# line format, heading
hfmt = " | ".join(len(cols) * ["{:>6} {:<16}"])
fmt = " | ".join(len(cols) * ["{:>6.4g} {!s:<16.16}"])
print(hfmt.format(*util.flatten(zip([name for name, _ in cols], itertools.repeat("term")))))
# data
coldata = [bykeyfun(kf) for _, kf in cols]
for row in zip(*coldata):
print(fmt.format(*util.flatten(row)))
示例3: group_by_sender
def group_by_sender(messages):
"""[Email] -> {str: [str]} : Associate lowercased email sender with a list of words."""
wordssd = collections.defaultdict(list)
for m in messages:
words = util.flatten(map(str.split, m.lines))
wordssd[m.sender.lower()].append(words)
return {sender: util.flatten(wordss) for sender, wordss in wordssd.items()}
示例4: getPlayerId
def getPlayerId(self, *args):
"""Get the id of the current player"""
a = tuple(flatten(args))
if self.playerId is not None and len(a) == 0:
return self.playerId
else:
return int(self.conn.sendReceive_flat("world.getPlayerId", flatten(args)))
示例5: test_flatten
def test_flatten(self):
def assertEqualListOrTuple(actual, expected):
assert isinstance(expected, (list, tuple,)), "Test logic error"
self.assertIsInstance(actual, (list, tuple,))
self.assertSequenceEqual(actual, expected)
assertEqualListOrTuple(util.flatten([]), [])
assertEqualListOrTuple(util.flatten([[([])]]), [])
assertEqualListOrTuple(util.flatten([[1,2],3,[[4]],[(5,[6,7],8)]]), [1,2,3,4,5,6,7,8])
示例6: getFormulaIdsFromPars
def getFormulaIdsFromPars(pars, onlyTheorems):
thmPars = None
if onlyTheorems:
thmPars = map(lambda x: x[1], filter(lambda par: re.search(r"thm", par[0]), pars.items()))
else:
thmPars = map(lambda x: x[1], pars.items())
formulaTokens = filter(lambda token : token[:5] == "<fid ", flatten(flatten(thmPars)))
return map(lambda token: token[5:-1], formulaTokens)
开发者ID:Zwackelmann,项目名称:zb_math_cluster_experiments,代码行数:10,代码来源:tokenize_paragraphs_with_formulas.py
示例7: Intersections
def Intersections(pts, console):
'''Returns a dictionary of Intersections with Connections, with strings as keys.'''
intind = OriginalIntersections(pts)
net = IntersectionsJoin(intind)
trimmed = IntersectionsTrim(net)
rejoined = IntersectionsJoin(trimmed)
intersections = IntersectionsBuild(rejoined, pts)
t0 = time.time()
intsInRange = list(set(util.flatten(map(lambda a: a[0], OptimalDistance(intersections))))) #makes list of intersections within reasonable distance to start/end
ultimate_trim = sorted(util.flatten(map(lambda a: intersections[a].references[0].references, intsInRange)))
console.add('Intersections', error=': '+str(time.time()-t0))
return IntersectionsBuild(IntersectionsJoin(ultimate_trim), pts)
示例8: smooth_hscroll
def smooth_hscroll(string, row, iterations, delay=0.2, font=default_FONT):
""" scrolls string at given row """
bytes = list(flatten(map(lambda c: font[c] + [0x00], string)))
for i in xrange(iterations):
position(0, row)
data(bytes[i:i+84])
time.sleep(delay)
示例9: choose_multi_label
def choose_multi_label(labels, lang_model):
longest = util.argmax(labels, scorer=lambda ngram: len(ngram))
if len(longest) > 3:
best = util.argmax(bigrams.trigrams(longest), lambda ng: lang_model.lidstone(ng))
best = (best,)
elif len(longest) == 3:
best = longest
best = (best,)
elif len(longest) <= 2:
# this is kinda shitty set of them .. would rather want all possible skip n-grams (O(N^2) of them?)
z = [(tuple(x),) for x in labels] + bigrams.bigrams(labels) + bigrams.trigrams(labels)
assert z
z = [x for x in z if len(util.flatten(x)) <= 3]
# sum is too weird
# lexicographic ordering of the top-ranked sublabels in the multilabel
def scorer(ngrams):
scores = [lang_model.lidstone(ng) for ng in ngrams]
if len(scores) < 3:
scores += [0]*(3 - len(scores))
scores.sort(reverse=True)
# print "SCORE %-30s %s" % (scores, ngrams)
return scores
z.sort(key= scorer, reverse=True)
# print "RANKING",z
best = z[0]
else:
assert False
return best
示例10: make
def make(filenames, nprocs, cut):
'''Create time residual PDF for a set of data files.
Note: you may wish to use a smaller number of nprocs than you have CPUs;
this function will almost certainly be I/O-bound.
:param filenames: list of RAT ROOT files containing data
:param cut: A Cut instance with cuts to apply to data
:param nprocs: number of parallel jobs to run
'''
p = multiprocessing.Pool(nprocs)
erf = ERF(cut=cut)
res = np.array(list(util.flatten(p.map(erf, filenames))))
print
print len(res), 'entries'
h, e = np.histogram(res, bins=750, range=(cut.t[0],cut.t[1]), normed=True)
pdf = np.array(zip(e,h))
print 'total events:', total_events.value
print 'events reconstructed:', events_reconstructed.value
print 'events passing cuts:', events_passing_cuts.value
with open('event_counts.txt', 'a') as f:
f.write('%s %s %s %i %i %i %i\n' % (str(cut.e), str(cut.r), str(cut.r),
len(res),
total_events.value,
events_reconstructed.value,
events_passing_cuts.value))
return pdf
示例11: __init__
def __init__(self, past, future, features = None):
"""Create a training pattern.
Parameters:
past -- past feature vectors as a tensor of shape [P, V]
where P is past days and V is the vectors/day
future -- future feature vectors as a tensor of [F, V]
where F is future days and V is the vectors/day
features -- a sequence of feature names to use
where None means use all features
"""
# calculate training input from past features
past_subfeatures = [[self._subfeatures(vector, features)
for vector in vectors]
for vectors in past]
self._input = numpy.array(
[list(util.flatten(vectors)) for vectors in past_subfeatures])
# calculate training output from future volatility
future_returns = numpy.log1p(
[[vector.ret for vector in vectors] for vectors in future])
self._output = numpy.std(future_returns, axis = 0, ddof = 1)\
* numpy.sqrt(252)
# calculate past returns for forecasts
self._past_returns = numpy.log1p(
[[vector.ret for vector in vectors] for vectors in past])
示例12: finalize
def finalize(self):
merged_clusters = []
for c1 in self.clusters.values():
existing = None
for m in c1:
for c2 in merged_clusters:
if m in c2:
existing = c2
break
if existing is not None:
break
if existing is not None:
print("Merging clusters (shouldn't happen very often.)")
existing.update(c1)
else:
merged_clusters.append(set(c1))
merged_clusters = [list(c) for c in merged_clusters]
all_mentions = util.flatten(merged_clusters)
assert len(all_mentions) == len(set(all_mentions))
return {
"doc_key": self.doc_key,
"sentences": self.sentences,
"speakers": self.speakers,
"clusters": merged_clusters
}
示例13: min_value
def min_value(self):
if self.min_scale_value:
return self.min_scale_value
data = map(itemgetter("data"), self.data)
if self.stacked:
data = self.get_cumulative_data()
return min(flatten(data))
示例14: __init__
def __init__(self, fsinput, fsgrammar, table=None):
"""
Initialize and return the object.
@param fsinput: The input feature structure
@type fsinput: C{nltk.featstruct.FeatStruct}
@param fsgrammar: The generation grammar
@type fsgrammar: C{nltk.featstruct.FeatStruct}
@param table: The feature value type table
@type table: C{fstypes.FeatureTypeTable}
"""
import copy
self.fsinput = fsinput
self.fsgrammar = fsgrammar
self.table = table
self.lr = LinkResolver()
self.gpr = GrammarPathResolver(copy.deepcopy(fsgrammar), table)
self.grammar_paths = flatten(self.gpr.resolve(copy.deepcopy(fsgrammar)))
# the type table has been passed in
# assign types to the feature values
if table:
for i, path in enumerate(self.grammar_paths):
path = assign_types(table, path)
self.grammar_paths[i] = path
示例15: featured_sources_by_category
def featured_sources_by_category(category=None):
q = Source.query(Source.featured_priority < 1)
if category: q = q.filter(Source.categories == category)
q = q.order(Source.featured_priority)
sources = q.fetch(400)
categories = util.unique_ordered_list(util.flatten(s.categories for s in sources))
if category and category not in categories: categories.append(category)
category_order = {category: i for i, category in enumerate(["Newspapers", "Culture", "Politics", "Tech", "Humor", "Local", "Longform"])}
categories.sort(key=lambda x: category_order.get(x, 99999))
sources_by_category = defaultdict(list)
for source in sources:
for category in source.categories:
sources_by_category[category].append(source)
max_items_per_category = 60 if category else 15
for category, items in sources_by_category.items():
sources_by_category[category] = items[:min(len(items), max_items_per_category)]
category_jsons = []
for category in categories:
category_jsons.append({"id": category, "name": category, "sources": [s.json() for s in sources_by_category[category]]})
return category_jsons