本文整理汇总了Python中map函数的典型用法代码示例。如果您正苦于以下问题:Python map函数的具体用法?Python map怎么用?Python map使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了map函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: diff_dicts
def diff_dicts(dict1, dict2):
"""Compares two dicts and returns the difference as a string,
if there is any.
Sorts two dicts (including sorting of the lists!!) and then diffs them.
This will ignore string types ('unicode' vs 'string').
args:
dict1: First dict
dict2: Second dict
returns:
A diff string if there's any difference, otherwise None.
"""
dict1 = order_dict(dict1)
dict2 = order_dict(dict2)
if dict1 == dict2:
return
dict1 = pprint.pformat(dict1).splitlines()
dict2 = pprint.pformat(dict2).splitlines()
# Remove unicode identifiers.
dict1 = map(lambda line: line.replace('u\'', '\''), dict1)
dict2 = map(lambda line: line.replace('u\'', '\''), dict2)
return '\n'.join(difflib.unified_diff(dict1, dict2, n=2))
示例2: test_format
def test_format(obj, precision=6):
tf = lambda o: test_format(o, precision)
delimit = lambda o: ', '.join(o)
otype = type(obj)
if otype is str:
return "'%s'" % obj
elif otype is float or otype is int:
if otype is int:
obj = float(obj)
fstr = '%%.%df' % precision
return fstr % obj
elif otype is set:
if len(obj) == 0:
return 'set()'
return '{%s}' % delimit(sorted(map(tf, obj)))
elif otype is dict:
return '{%s}' % delimit(sorted(tf(k)+': '+tf(v) for k,v in obj.items()))
elif otype is list:
return '[%s]' % delimit(map(tf, obj))
elif otype is tuple:
return '(%s%s)' % (delimit(map(tf, obj)), ',' if len(obj) is 1 else '')
elif otype.__name__ in ['Vec','Mat']:
entries = tf({x:obj.f[x] for x in obj.f if obj.f[x] != 0})
return '%s(%s, %s)' % (otype.__name__, test_format(obj.D), entries)
else:
return str(obj)
示例3: schema_shell
def schema_shell(self):
"""Performs the 'schema-shell' command."""
schema_shell_home = self.env.get(SCHEMA_SHELL_HOME)
assert (schema_shell_home is not None), \
("Environment variable undefined: %r" % SCHEMA_SHELL_HOME)
assert os.path.isdir(schema_shell_home), \
("Invalid home directory for KijiSchema shell: %r" % schema_shell_home)
schema_shell_script = os.path.join(schema_shell_home, "bin", "kiji-schema-shell")
assert os.path.isfile(schema_shell_script), \
("KijiSchema shell not found: %r" % schema_shell_script)
env = dict(self.env)
classpath = env.get(KIJI_CLASSPATH, "").split(":") + list(self.express.get_classpath())
env[KIJI_CLASSPATH] = ":".join(classpath)
java_opts = env.get("JAVA_OPTS", "")
# FIXME: I cannot find any trace of the Java system property "express.tmpjars"!
# java_opts += (" -Dexpress.tmpjars=%s" % ???)
# Relevant for KijiSchema 1.1 only and will be removed in Express 3.0:
java_opts += " -Dorg.kiji.schema.impl.AvroCellEncoder.SCHEMA_VALIDATION=DISABLED"
env["JAVA_OPTS"] = java_opts
cmd = [schema_shell_script]
logging.debug("Launching kiji-schema shell with:\n%s\with KIJI_CLASSPATH:\n%s",
" \\\n\t".join(map(repr, cmd)), "\n".join(map(tab_indent, classpath)))
logging.debug("Computed KIJI_CLASSPATH:")
proc = subprocess.Popen(cmd, env=env)
try:
return proc.wait()
except subprocess.SubProcessError:
proc.kill()
示例4: testSequential
def testSequential(self):
dq = ResizableDispatchQueue(self.slow, 1)
map(dq.put, range(3))
# This should finish in about 0.6 seconds, with nothing still on
# the queue because we stop after 0.5 seconds so all three tasks
# will have been dispatched one after another.
return self._stopAndTest(0.5, dq, [])
示例5: testNarrowNarrowWiden
def testNarrowNarrowWiden(self):
dq = ResizableDispatchQueue(self.slow, 3)
map(dq.put, range(11))
reactor.callLater(0.1, dq.setWidth, 2)
reactor.callLater(0.3, dq.setWidth, 1)
reactor.callLater(0.7, dq.setWidth, 3)
return self._stopAndTest(0.9, dq, [10])
示例6: parsexyz
def parsexyz(filename):
lst = parseALL(filename," Number Number Type"," ")
lines = map(float,lst[1:-1])
xyz = [lines[x:x+3] for x in xrange(0, len(lines), 3)]
b = xyz[1::2]
Z = map(int,lines[1::6])
return (Z,np.array(b))
示例7: trigger_mopage_refresh
def trigger_mopage_refresh(obj, event):
event_pages = filter(None,
map(lambda parent: IEventPage(parent, None),
aq_chain(obj)))
if not event_pages:
# We are not within an event page.
# We only trigger when publishing an event page
# or a child of an event page.
return
triggers = filter(None,
map(lambda parent: IPublisherMopageTrigger(parent, None),
aq_chain(obj)))
if not triggers or not triggers[0].is_enabled():
return
for events in event_pages:
IMopageModificationDate(events).touch()
from collective.taskqueue import taskqueue
trigger_url = triggers[0].build_trigger_url()
callback_path = '/'.join(getSite().getPhysicalPath()
+ ('taskqueue_events_trigger_mopage_refresh',))
taskqueue.add(callback_path, params={'target': trigger_url})
示例8: run_clustering_example
def run_clustering_example(run):
global current_graph_state
n = 100
accs = []
for i in range(100):
current_graph_state = GraphState()
sampler = run()
samples = [sampler() for i in range(n)]
templ = current_graph_state.to_JSON()
rand_params = hs_rand_template_params(templ)
print hs_sample_bayes_net(templ, rand_params)
varvals = state_to_varvals(hs_sample_bayes_net(templ, rand_params))
frozen_samples = [freeze_value(samp, varvals) for samp in samples]
true_latents = [x[0] for x in frozen_samples]
print true_latents
templ = condition_on_frozen_samples(current_graph_state, samples, [x[1] for x in frozen_samples])
print 'best score', params_score(templ, rand_params)
state_params_list = infer_states_and_parameters(templ)
rand_cs = params_to_cluster_centers(rand_params)
iter_accs = []
j = 0
for (state, params, score) in state_params_list:
print 'score', score
cs = params_to_cluster_centers(params)
# if j > 1:
# varvals = state_to_varvals(state)
# state_latents = [freeze_value(samp[0], varvals) for samp in samples]
# acc = cluster_assignment_accuracy(true_latents, state_latents)
# iter_accs.append(acc)
j += 1
accs.append(iter_accs)
print map(mean, zip(*accs))
示例9: find_segments
def find_segments(doc, key, use_segment_table = True):
key_pieces = key.split(':')
while len(key_pieces) < 3:
key_pieces.append('*')
filter_func = lambda x: str(x.ifos) == key_pieces[0] and (str(x.name) == key_pieces[1] or key_pieces[1] == '*') and (str(x.version) == key_pieces[2] or key_pieces[2] == '*')
# Find all segment definers matching the critieria
seg_def_table = lsctables.SegmentDefTable.get_table(doc)
seg_defs = filter(filter_func, seg_def_table)
seg_def_ids = map(lambda x: str(x.segment_def_id), seg_defs)
# Find all segments belonging to those definers
if use_segment_table:
seg_table = lsctables.SegmentTable.get_table(doc)
seg_entries = filter(lambda x: str(x.segment_def_id) in seg_def_ids, seg_table)
else:
seg_sum_table = lsctables.SegmentSumTable.get_table(doc)
seg_entries = filter(lambda x: str(x.segment_def_id) in seg_def_ids, seg_sum_table)
# Combine into a segmentlist
ret = segmentlist(map(lambda x: segment(x.start_time, x.end_time), seg_entries))
ret.coalesce()
return ret
示例10: step
def step(self):
r = self.table.step(self.tps)
self.t += self.tps
if not self.table.fullyOcc():
self.lastseent = self.t
self.lastseen = map(int,self.table.ball.getpos())
#print self.lastseen, self.lastseent
# Update particles
weights = [p.weight for p in self.particles]
ps = self.getPartPs()
newws = [w*p for w,p in zip(weights, ps)]
newws.append(self.newp)
newws = map(lambda x: np.power(x,self.temp), newws)
totw = sum(newws)
newws = map(lambda x: x / totw, newws)
#seff = sum(map(lambda w: 1 / (w*w), newws))
newparts = copy.copy(self.particles); newparts.append("Empty")
newps = selectReplace(newparts,newws,len(self.particles))
rejns = 0.
for i in range(len(newps)):
if newps[i] == "Empty": newps[i] = Particle(self.table,self.kapv,self.kapb,self.kapm,self.perr,self.tps,self.lastseent, self.lastseen); rejns += 1.
#else: newps[i] = copy.deepcopy(newps[i])
for p in newps: p.weight = 1
self.lastrej = rejns / self.npart
self.particles = newps
return r
示例11: get_disks
def get_disks(self):
disks_to_attach_names = self.__get_disks_to_attach_names()
self.disks = map(lambda disk: self.__add_attach_data_to_disk(disk, disks_to_attach_names), self.disks)
return map(lambda disk: {"name": disk.getAttribute('ovf:diskId'),
"capacity": int(disk.getAttribute('ovf:capacity')) / (2**30),
"attach": disk.getAttribute('attach')}, self.disks)
示例12: plot_data
def plot_data(self,v):
"""
Simple plot window that can be updated very fast.
No grid or resize like plot()
"""
if self.plotwin == None:
self.plotwin = Tk()
self.plotwin.title('Phoenix plot')
self.plotwin.protocol("WM_DELETE_WINDOW", self.clean_qplot)
self.canvas = Canvas(self.plotwin, background='white', width=WIDTH + 20, height=HALF_HEIGHT*2 + 20)
self.canvas.pack()
self.canvas.create_rectangle(10, 10, WIDTH+10, HALF_HEIGHT*2 + 10, outline='#009900')
self.canvas.create_line([(10, HALF_HEIGHT+10), (WIDTH+10, HALF_HEIGHT+10)], fill='#00ff00')
if len(self.plot_trace) != 0:
map(lambda x: self.canvas.delete(x), self.plot_trace)
self.plot_trace = []
self.plotwin.update()
numchans = len(v[0]) - 1
npoints = len(v)
xscale = WIDTH/v[-1][0]
yscale = HALF_HEIGHT/YMAX
for ch in range(numchans):
a = []
for i in range(npoints):
x = 10 + v[i][0] * xscale
y = (HALF_HEIGHT + 10) - v[i][ch+1] * yscale
a.append((x, y))
line = self.canvas.create_line(a, fill=self.colors[ch])
self.plot_trace.append(line)
self.plotwin.update()
示例13: farpairs_ranges
def farpairs_ranges(ratings):
pairs = []
for i in xrange(len(ratings)):
for j in xrange(len(ratings)):
dist = abs(i-j)
std1 = ratings[i][2]
avg1 = ratings[i][1]
std2 = ratings[j][2]
avg2 = ratings[j][1]
if (avg1 + std1 > avg2 - std2 and avg1 + std1 <= avg2 + std2) or \
(avg2 + std2 > avg1 - std1 and avg2 + std2 <= avg1 + std1):
pairs.append([i, j, dist])
pairs.sort(lambda x,y: x[2] < y[2] and -1 or 1)
pairs = map(lambda x: x[:1], pairs)
idxs = []
for p in pairs:
idxs.extend(p)
subset = set()
while len(idxs) > 0 and len(subset) < WINDOW:
subset.add(idxs.pop())
subset = filter(lambda x: x in subset, range(len(ratings)))
subset = map(lambda x: ratings[x], subset)
return reorder(ratings, subset)
示例14: test_001_diff_phasor_vcc
def test_001_diff_phasor_vcc(self):
a = [1+2j,2+3.5j,3.5+4j,4+5j,5+6j]
b = [1j,1j,1j,1j,1j]
c = [-1j+3,1j,-7+0j,2.5j+0.333,3.2j]
d = [(0.35979271051026462+0.89414454782483865j),
(0.19421665709046287+0.024219594550527801j),
(0.12445564785882557+0.40766238899138718j),
(0.041869638845043688+0.97860437393366329j),
(0.068927762235083234+0.16649764877365247j)]
e = [(0.16207552830286298+0.435385030608331j),
(0.47195779613669675+0.37824764113272558j),
(0.13911998015446148+0.6585095669811617j),
(0.093510743358783954+0.98446560079828938j),
(0.86036393297704694+0.72043005342024602j)]
multconj = lambda x,y: x.conjugate()*y
src_data = a+b+c+d+e
expected_result = [0j,0j,0j,0j,0j]+map(multconj,a,b)+map(multconj,b,c)+map(multconj,c,d)+map(multconj,d,e)
src = blocks.vector_source_c(src_data)
s2v = blocks.stream_to_vector(gr.sizeof_gr_complex, 5)
diff_phasor_vcc = grdab.diff_phasor_vcc(5)
v2s = blocks.vector_to_stream(gr.sizeof_gr_complex, 5)
dst = blocks.vector_sink_c()
self.tb.connect(src, s2v, diff_phasor_vcc, v2s, dst)
self.tb.run()
result_data = dst.data()
# print expected_result
# print result_data
self.assertComplexTuplesAlmostEqual(expected_result, result_data, 6)
示例15: get_budgets
def get_budgets(self): # {{{
# Get categories
categories = self.get_categories()
# Issue request for budget utilization
first_of_this_month = date.today().replace(day=1)
eleven_months_ago = (first_of_this_month - timedelta(days=330)).replace(day=1)
url = "{}/getBudget.xevent".format(MINT_ROOT_URL)
params = {
'startDate': eleven_months_ago.strftime('%m/%d/%Y'),
'endDate': first_of_this_month.strftime('%m/%d/%Y'),
'rnd': Mint.get_rnd(),
}
response = json.loads(self.get(url, params=params, headers=JSON_HEADER).text)
# Make the skeleton return structure
budgets = {
'income': response['data']['income'][
str(max(map(int, response['data']['income'].keys())))
]['bu'],
'spend': response['data']['spending'][
str(max(map(int, response['data']['income'].keys())))
]['bu']
}
# Fill in the return structure
for direction in budgets.keys():
for budget in budgets[direction]:
budget['cat'] = self.get_category_from_id(
budget['cat'],
categories
)
return budgets