本文整理汇总了Python中disco.core.Disco.new_job方法的典型用法代码示例。如果您正苦于以下问题:Python Disco.new_job方法的具体用法?Python Disco.new_job怎么用?Python Disco.new_job使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类disco.core.Disco
的用法示例。
在下文中一共展示了Disco.new_job方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: IndexJob
# 需要导入模块: from disco.core import Disco [as 别名]
# 或者: from disco.core.Disco import new_job [as 别名]
class IndexJob(object):
def __init__(self, spec, discodex, disco_addr="disco://localhost", profile=False):
# TODO(sqs): refactoring potential with PagerankJob
self.spec = spec
self.discodex = discodex
self.docset = Docset(spec.docset_name)
self.disco = Disco(DiscoSettings()["DISCO_MASTER"])
self.nr_partitions = 8
self.profile = profile
def start(self):
results = self.__run_job(self.__index_job())
self.__run_discodex_index(results)
def __run_job(self, job):
results = job.wait()
if self.profile:
self.__profile_job(job)
return results
def __index_job(self):
return self.disco.new_job(
name="index_tfidf",
input=["tag://" + self.docset.ddfs_tag],
map_reader=docparse,
map=TfIdf.map,
reduce=TfIdf.reduce,
sort=True,
partitions=self.nr_partitions,
partition=TfIdf.partition,
merge_partitions=False,
profile=self.profile,
params=dict(doc_count=self.docset.doc_count),
)
def __run_discodex_index(self, results):
opts = {
"parser": "disco.func.chain_reader",
"demuxer": "freequery.index.tf_idf.TfIdf_demux",
"nr_ichunks": 1, # TODO(sqs): after disco#181 fixed, increase this
}
ds = DataSet(input=results, options=opts)
origname = self.discodex.index(ds)
self.disco.wait(origname) # origname is also the disco job name
self.discodex.clone(origname, self.spec.invindex_name)
示例2: LinkParseJob
# 需要导入模块: from disco.core import Disco [as 别名]
# 或者: from disco.core.Disco import new_job [as 别名]
class LinkParseJob(object):
def __init__(self, spec, verbose=False, **kwargs):
self.spec = spec
self.docset = Docset(self.spec.docset_name)
self.disco = Disco("disco://localhost")
self.verbose = verbose
def start(self):
from disco import func
job = self.disco.new_job(
name="linkparse",
input=self.docset.dump_uris(),
map_reader=docparse,
map=linkparse_map,
map_output_stream=(func.map_output_stream,
func.disco_output_stream,
LinkFileOutputStream.disco_output_stream),
partitions=0,
save=True,
)
results = job.wait()
self.__tag_results(results)
if self.verbose:
self.__print_results(results)
def __tag_results(self, results):
from disco.ddfs import DDFS
ddfs = DDFS()
results_tag = results[0]
ddfs.put(self.docset.ddfs_link_file_tag, list(ddfs.blobs(results_tag)))
# remove old, temporary tag
ddfs.delete(results_tag)
def __print_results(self, results):
for doc in result_iterator(results, tempdir=False, reader=doclinksparse):
print "%s\n\t%s" % (doc.uri, "\n\t".join(doc.link_uris))
示例3: reduce
# 需要导入模块: from disco.core import Disco [as 别名]
# 或者: from disco.core.Disco import new_job [as 别名]
title = line[-4]
year = line[-1]
yield year, title
def reduce(iter, params):
from disco.util import kvgroup
for year, titles in kvgroup(sorted(iter)):
romantic_titles = [title for title in titles if "love" in title.lower()]
yield year, len(romantic_titles)
disco = Disco(DiscoSettings()['DISCO_MASTER'])
print "Starting Disco job.."
print "Go to %s to see status of the job." % disco.master
results = disco.new_job(name="song-titles",
input=["tag://hackreduce:millionsongs:subset"],
map=map,
reduce=reduce,
save=True).wait()
print "Job done. Results:"
chart_url = "http://chart.apis.google.com/chart?chxr=0,0,15&chxt=y&chbh=a,4,10&chs=738x220&cht=bvs&chco=4D89F9&chds=0,15&chd=t:"
res_list = []
# Print result to user
for year, titles in result_iterator(results):
res_list.append(str(titles))
chart_url += ",".join(res_list)
chart_url += "&chdl=Songs+with+%22Love%22+in+their+titles&chtt=Most+Romantic+Year+by+Song+Titles"
print chart_url
示例4: fun_map3
# 需要导入模块: from disco.core import Disco [as 别名]
# 或者: from disco.core.Disco import new_job [as 别名]
return []
def fun_map3(e, params):
fail
def fun_map4(e, params):
time.sleep(4)
return []
tserver.run_server(data_gen)
disco = Disco(sys.argv[1])
jobs = []
for i, m in enumerate([fun_map1, fun_map2, fun_map3, fun_map4]):
jobs.append(disco.new_job(
name = "test_waitmany_%d" % (i + 1),
input = tserver.makeurl([""] * 5),
map = m))
res = []
while jobs:
cont = False
ready, jobs = disco.results(jobs, timeout = 2000)
res += ready
for n, r in res:
if n.startswith("test_waitmany_3"):
if r[0] != "dead":
raise Exception("Invalid job status: %s" % n)
elif r[0] != "ready":
raise Exception("Invalid job status: %s" % n)
disco.purge(n)
示例5: fun_map
# 需要导入模块: from disco.core import Disco [as 别名]
# 或者: from disco.core.Disco import new_job [as 别名]
def fun_map(e, params):
if type(e) == tuple:
return [(e[0] + params['suffix'], int(e[1]) + 1)]
else:
return [(e + params['suffix'], 0)]
def fun_reduce(iter, out, params):
for k, v in iter:
out.add(k + "-", v)
tserver.run_server(data_gen)
disco = Disco(sys.argv[1])
results = disco.new_job(name = "test_chain_0", input = tserver.makeurl([""] * 100),
map = fun_map, reduce = fun_reduce, nr_reduces = 4,
sort = False, params = {'suffix': '0'}).wait()
i = 1
while i < 10:
nresults = disco.new_job(name = "test_chain_%d" % i, input = results,
map = fun_map, reduce = fun_reduce, nr_reduces = 4,
map_reader = chain_reader, sort = False,
params = {'suffix': str(i)}).wait()
disco.purge(jobname(results[0]))
results = nresults
i += 1
for key, value in result_iterator(results):
if key[:5] not in ani or key[5:] != "0-1-2-3-4-5-6-7-8-9-":
示例6: range
# 需要导入模块: from disco.core import Disco [as 别名]
# 或者: from disco.core.Disco import new_job [as 别名]
tserver.run_server(data_gen)
N = 10
results = {}
inputs = []
for i in range(N):
a = [i] * 10
b = range(i, i + 10)
inputs += ["%d:%d" % x for x in zip(a, b)]
results[str(i)] = sum(b)
disco = Disco(sys.argv[1])
# map results in individual files, one per input file (default mode)
job1 = disco.new_job(\
name = "test_partfile1",
input = tserver.makeurl(inputs),
map = fun_map)
# map results in one big partition file per host
job2 = disco.new_job(\
name = "test_partfile2",
input = tserver.makeurl(inputs),
map = fun_map,
nr_reduces = 1)
check_results(job1)
check_results(job2)
job1.purge()
job2.purge()
print "ok"
示例7: map
# 需要导入模块: from disco.core import Disco [as 别名]
# 或者: from disco.core.Disco import new_job [as 别名]
import sys
from disco.core import Disco, result_iterator
from disco.settings import DiscoSettings
def map(entry, params):
for word in entry.split():
yield word, 1
def reduce(iter, out, params):
s = {}
for word, freq in iter:
s[word] = s.get(word, 0) + int(freq)
for word, freq in s.iteritems():
out.add(word, freq)
disco = Disco(DiscoSettings()['DISCO_MASTER'])
print "Starting Disco job.."
print "Go to %s to see status of the job." % disco.master
results = disco.new_job(name="wordcount",
input=["http://discoproject.org/chekhov.txt"],
map=map,
reduce=reduce).wait()
print "Job done. Results:"
for word, freq in result_iterator(results):
print word, freq
示例8: int
# 需要导入模块: from disco.core import Disco [as 别名]
# 或者: from disco.core.Disco import new_job [as 别名]
import time
from mapper import map
from reducer import reduce
name = "gap-%s" % int(time.time())
disco = Disco(DiscoSettings()['DISCO_MASTER'])
print "Starting Disco job (%s).." % name
print "Go to %s to see status of the job." % disco.master
results = disco.new_job(name=name,
input=["tag://gap:1million"],
map_input_stream=(
func.map_input_stream,
func.chain_reader,
),
map=map,
reduce=reduce,
save=True).wait()
print "Job done. Results:"
f = open('data.js', 'w')
for time_of_day, scores in result_iterator(results):
str_time = time_of_day.strftime("%Y-%m-%d %H:%M")
s = json.dumps({
'time': str_time,
'scores': scores
})
f.write(s + ",\n")
示例9: data_gen
# 需要导入模块: from disco.core import Disco [as 别名]
# 或者: from disco.core.Disco import new_job [as 别名]
import tserver, sys, time
from disco.core import Disco
def data_gen(path):
return "1 2 3\n"
def fun_map(e, params):
import time
time.sleep(100)
return []
disco = Disco(sys.argv[1])
num = sum(x['max_workers'] for x in disco.nodeinfo()['available'])
print >> sys.stderr, num, "slots available"
tserver.run_server(data_gen)
job = disco.new_job(name = "test_kill",
input = tserver.makeurl([""] * num * 2), map = fun_map)
time.sleep(10)
print >> sys.stderr, "Killing", job.name
job.kill()
time.sleep(5)
if job.jobinfo()['active'] == "dead":
print "ok"
job.purge()
else:
raise Exception("Killing failed")
示例10: fun_map
# 需要导入模块: from disco.core import Disco [as 别名]
# 或者: from disco.core.Disco import new_job [as 别名]
return path[1:] + "\n"
def fun_map(e, params):
x, y = map(float, e.split("|"))
return [(mod1.plusceil(x, y) + math.ceil(1.5), "")]
tserver.run_server(data_gen)
disco = Disco(sys.argv[1])
inputs = ["0.5|1.2"]
print "disco tests.."
# default
job = disco.new_job(
name = "test_modutil1",
input = tserver.makeurl(inputs),
map = fun_map)
checkl("test_modutil1", result_iterator(job.wait()), [("4.0", "")])
job.purge()
print "test_modutil1 ok"
job = disco.new_job(
name = "test_modutil2",
input = tserver.makeurl(inputs),
required_modules = modutil.find_modules([fun_map]),
map = fun_map)
checkl("test_modutil2", result_iterator(job.wait()), [("4.0", "")])
job.purge()
print "test_modutil2 ok"
job = disco.new_job(
示例11: fun_reduce
# 需要导入模块: from disco.core import Disco [as 别名]
# 或者: from disco.core.Disco import new_job [as 别名]
def fun_reduce(iter, out, params):
for k, v in iter:
out.add("[%s]" % k, v)
tserver.run_server(data_gen)
disco = Disco(sys.argv[1])
num = sum(x['max_workers'] for x in disco.nodeinfo()['available'])
print >> sys.stderr, num, "slots available"
inputs = tserver.makeurl(range(num * 10))
random.shuffle(inputs)
jobs = []
for i in range(5):
jobs.append(disco.new_job(name = "test_async_%d" % i,
input = inputs[i * (num * 2):(i + 1) * (num * 2)],
map = fun_map, reduce = fun_reduce, nr_reduces = 11,
sort = False))
time.sleep(1)
all = dict(("[%s]" % i, 0) for i in range(num * 10))
while jobs:
ready, jobs = disco.results(jobs)
for name, results in ready:
for k, v in result_iterator(results[1]):
all[k] += 1
disco.purge(name)
for v in all.values():
if v != 10:
raise "Invalid results: %s" % all
示例12: timedelta
# 需要导入模块: from disco.core import Disco [as 别名]
# 或者: from disco.core.Disco import new_job [as 别名]
date_obj = datetime.fromtimestamp(float(time[:-3])) # timestamp has milliseconds, shave em off
nearest_minute = date_obj - timedelta(minutes=date_obj.minute % 1, seconds=date_obj.second, microseconds=date_obj.microsecond)
yield (nearest_minute, {'unique_id': uid, 'query': query, 'frequency': frequency})
def reduce(iter, params):
# This doesn't work at all, its from an old example.
for unique_id, counts in kvgroup(sorted(iter)):
yield unique_id, sum(counts)
disco = Disco(DiscoSettings()['DISCO_MASTER'])
print "Starting Disco job.."
print "Go to %s to see status of the job." % disco.master
"""
:clicks (ad id,people who clicked the ads)
"""
results = disco.new_job(name="bartekc",
input=["tag://hackreduce:search:history"],
map_input_stream=(
func.map_input_stream,
func.chain_reader,
),
map=map,
reduce=reduce,
save=True).wait()
print "Job done. Results:"
for word, count in result_iterator(results):
print word, count
示例13: data_gen
# 需要导入模块: from disco.core import Disco [as 别名]
# 或者: from disco.core.Disco import new_job [as 别名]
from disco.core import Disco, result_iterator
def data_gen(path):
return path[1:] + "\n"
def fun_map(e, params):
k = str(int(math.ceil(float(e))) ** 2)
return [(base64.encodestring(k), "")]
tserver.run_server(data_gen)
disco = Disco(sys.argv[1])
inputs = [1, 485, 3245]
job = disco.new_job(name = "test_reqmodules",
nr_reduces = 1,
input = tserver.makeurl(inputs),
map = fun_map,
sort = False)
res = list(result_iterator(job.wait()))
if len(res) != len(inputs):
raise Exception("Too few results: Got: %d Should be %d" %
(len(res), len(inputs)))
cor = map(lambda x: base64.encodestring(str(int(math.ceil(x)) ** 2)), inputs)
for k, v in res:
if k not in cor:
raise Exception("Invalid answer: %s" % k)
cor.remove(k)
示例14: range
# 需要导入模块: from disco.core import Disco [as 别名]
# 或者: from disco.core.Disco import new_job [as 别名]
inputs = []
for i in range(N):
a = [i] * 10
b = range(i, i + 10)
inputs += ["%d:%d" % x for x in zip(a, b)]
results[str(i)] = str(sum(b))
random.shuffle(inputs)
disco = Disco(sys.argv[1])
print "Running two map jobs.."
map1 = disco.new_job(\
name = "test_onlyreduce1",
input = tserver.makeurl(inputs[:len(inputs) / 2]),
map = fun_map,
partition = fun_partition,
nr_reduces = N)
map2 = disco.new_job(\
name = "test_onlyreduce2",
input = tserver.makeurl(inputs[len(inputs) / 2:]),
map = fun_map,
partition = fun_partition,
nr_reduces = N)
results1 = map1.wait()
print "map1 done"
results2 = map2.wait()
print "map2 done"
示例15: minFrom
# 需要导入模块: from disco.core import Disco [as 别名]
# 或者: from disco.core.Disco import new_job [as 别名]
newdistances = {}
def minFrom(d, a):
for k, v in a.items():
d[k] = mymin(d.get(k, -1), v)
for d in distances:
if d.get("nodes"):
nodes = d["nodes"]
minFrom(newdistances, d["distances"])
yield node, json.dumps([node,newdistances,nodes])
disco = Disco(DiscoSettings()['DISCO_MASTER'])
print "Starting Disco job.."
print "Go to %s to see status of the job." % disco.master
results = disco.new_job(name="shortestpath",
input=["file:///home/marko/tmp/disco/out.txt"],
map=map,
reduce=reduce,
save=True).wait()
print "Job done"
out = file("out.txt", "w")
for node, data in result_iterator(results):
print >>out, data
out.close()