本文整理汇总了Python中util.chunks函数的典型用法代码示例。如果您正苦于以下问题:Python chunks函数的具体用法?Python chunks怎么用?Python chunks使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了chunks函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: generate
def generate(config, dnat=False, test=True):
public_ip = config["public_ip"]
current_ip = config["base_ip"]
dnsmasq_content = ""
for group in config["groups"].values():
if not dnat:
c = chunks([proxy["domain"] for proxy in group["proxies"]], 5)
else:
c = chunks([proxy["domain"] for proxy in group["proxies"] if proxy["dnat"]], 5)
for chunk in c:
if not dnat:
dnsmasq_content += generate_dns(chunk, public_ip)
else:
dnsmasq_content += generate_dns(chunk, current_ip)
if test:
if not dnat:
dnsmasq_content += generate_dns('ptest.verdandi.is', public_ip)
dnsmasq_content += generate_dns('ptest2.verdandi.is', public_ip)
else:
dnsmasq_content += generate_dns('ptest.verdandi.is', current_ip)
dnsmasq_content += generate_dns('ptest2.verdandi.is', current_ip)
if dnat:
for group in config["groups"].values():
for proxy in group["proxies"]:
if not proxy["dnat"]:
current_ip = long2ip(ip2long(current_ip) + 1)
dnsmasq_content += generate_dns(proxy["domain"], current_ip)
return dnsmasq_content
示例2: train
def train(self, X_train, X_val):
train_true = filter(lambda x: x[2]==1, X_train)
train_false = filter(lambda x: x[2]==0, X_train)
val_true = filter(lambda x: x[2]==1, X_val)
val_false = filter(lambda x: x[2]==0, X_val)
n_train_true = len(train_true)
n_val_true = len(val_true)
make_epoch_helper = functools.partial(make_epoch, train_true=train_true, train_false=train_false, val_true=val_true, val_false=val_false)
logging.info("Starting training...")
epoch_iterator = ParallelBatchIterator(make_epoch_helper, range(P.N_EPOCHS), ordered=False, batch_size=1, multiprocess=False, n_producers=1)
for epoch_values in epoch_iterator:
self.pre_epoch()
train_epoch_data, val_epoch_data = epoch_values
train_epoch_data = util.chunks(train_epoch_data, P.BATCH_SIZE_TRAIN)
val_epoch_data = util.chunks(val_epoch_data, P.BATCH_SIZE_VALIDATION)
self.do_batches(self.train_fn, train_epoch_data, self.train_metrics)
self.do_batches(self.val_fn, val_epoch_data, self.val_metrics)
self.post_epoch()
logging.info("Setting learning rate to {}".format(P.LEARNING_RATE * ((0.985)**self.epoch)))
self.l_r.set_value(P.LEARNING_RATE * ((0.985)**self.epoch))
示例3: collect_tweets_by_ids
def collect_tweets_by_ids(tweet_ids_config_filepath, output_folder, config):
apikeys = list(config['apikeys'].values()).pop()
tweet_ids_config = {}
with open(os.path.abspath(tweet_ids_config_filepath), 'r') as tweet_ids_config_rf:
tweet_ids_config = json.load(tweet_ids_config_rf)
max_range = 100
current_ix = tweet_ids_config['current_ix'] if ('current_ix' in tweet_ids_config) else 0
total = len(tweet_ids_config['tweet_ids'][current_ix:])
tweet_id_chuncks = util.chunks(tweet_ids_config['tweet_ids'][current_ix:], max_range)
for tweet_ids in tweet_id_chuncks:
try:
twitterCralwer = TwitterCrawler(apikeys=apikeys, client_args=CLIENT_ARGS, output_folder = output_folder)
twitterCralwer.lookup_tweets_by_ids(tweet_ids)
current_ix += len(tweet_ids)
except Exception as exc:
logger.error(exc)
logger.error(util.full_stack()) #don't care, if Ctrl+c is hit, does not handle it. When you restart, it restarts from the last chunk (too much trouble to handle Ctrl + c).
# you will get duplicate tweets, so what...
pass
tweet_ids_config['current_ix'] = current_ix
flash_cmd_config(tweet_ids_config, tweet_ids_config_filepath, output_folder)
logger.info('COMPLETED -> (current_ix: [%d/%d])'%(current_ix, total))
logger.info('PAUSE %ds to CONTINUE...'%WAIT_TIME)
time.sleep(WAIT_TIME)
else:
logger.info('[tweets_by_ids] ALL COMPLETED')
示例4: decode
def decode(self, server, block_header, target, job_id = None, extranonce2 = None):
if block_header:
job = Object()
binary_data = block_header.decode('hex')
data0 = np.zeros(64, np.uint32)
data0 = np.insert(data0, [0] * 16, unpack('IIIIIIIIIIIIIIII', binary_data[:64]))
job.target = np.array(unpack('IIIIIIII', target.decode('hex')), dtype=np.uint32)
job.header = binary_data[:68]
job.merkle_end = np.uint32(unpack('I', binary_data[64:68])[0])
job.time = np.uint32(unpack('I', binary_data[68:72])[0])
job.difficulty = np.uint32(unpack('I', binary_data[72:76])[0])
job.state = sha256(STATE, data0)
job.f = np.zeros(8, np.uint32)
job.state2 = partial(job.state, job.merkle_end, job.time, job.difficulty, job.f)
job.targetQ = 2**256 / int(''.join(list(chunks(target, 2))[::-1]), 16)
job.job_id = job_id
job.extranonce2 = extranonce2
job.server = server
calculateF(job.state, job.merkle_end, job.time, job.difficulty, job.f, job.state2)
if job.difficulty != self.difficulty:
self.set_difficulty(job.difficulty)
return job
示例5: decode
def decode(self, server, block_header, target, job_id = None, extranonce2 = None):
if block_header:
job = Object()
binary_data = block_header.decode('hex')
#data0 = list(unpack('<16I', binary_data[:64])) + ([0] * 48)
job.headerX = binary_data[:76]
job.dataX = unpack('<19I', job.headerX)
job.target = unpack('<8I', target.decode('hex'))
job.header = binary_data[:68]
job.merkle_end = uint32(unpack('<I', binary_data[64:68])[0])
job.time = uint32(unpack('<I', binary_data[68:72])[0])
job.difficulty = uint32(unpack('<I', binary_data[72:76])[0])
# job.state = sha256(STATE, data0)
job.targetQ = 2**256 / int(''.join(list(chunks(target, 2))[::-1]), 16)
job.job_id = job_id
job.extranonce2 = extranonce2
job.server = server
if job.difficulty != self.difficulty:
self.set_difficulty(job.difficulty)
return job
示例6: getstatusforfids
def getstatusforfids(self, fids):
status = {}
for chunk in chunks(fids, 50):
for f in arlalow.fetchbulkstatus(self.fsconn, chunk):
status[f["fid"]] = f["status"]
return status
示例7: extract_all_labels
def extract_all_labels(filenames, out_filepath=DATA_FOLDER+'labels.p', chunk_size=2000):
print "EXTRACTING ALL LABELS INTO {0}".format(out_filepath)
all_labels = []
label_dict = {}
filenames_chunks = util.chunks(filenames, chunk_size)
for i, chunk in enumerate(filenames_chunks):
pool = Pool(processes=util.CPU_COUNT)
chunk_labels = pool.map(extract_labels, chunk)
pool.close()
for filepath, labels in zip(chunk, chunk_labels):
if labels is not None:
file_id = util.filename_without_extension(filepath)
label_dict[file_id] = labels
all_labels += labels
print i+1, '/', len(filenames_chunks)
#Write labels to file
with open(out_filepath,'w') as f:
pickle.dump(label_dict, f)
print '\nLabels:'
print len(set(all_labels))
print Counter(all_labels)
示例8: __call__
def __call__(self, message, state=None, *, pad=True):
state = state or self.initial_state
prepared_message = message + (self.padding(len(message)) if pad else b"")
assert len(prepared_message) % self.block_size == 0
for block in chunks(prepared_message, self.block_size):
state = self.compress(state, block)
return state
示例9: predict
def predict(self, data, modes):
"""predict whether a list of position follows atrain route by detecting
the nearest train stops. Input is the pandas data frame of
measurements and an array of current mode predictions. Returns
an array of predicted modes of the same size as the input data
frame has rows.
"""
# extract lat/lon from data frame
lat = data['WLATITUDE'].values
lon = data['WLONGITUDE'].values
# chunk is a tuple (start_idx, end_idx, mode)
for start_idx, end_idx, _ in ifilter(lambda chunk: chunk[2] in [MODE_CAR, MODE_BUS, MODE_TRAIN],
chunks(modes, include_values=True)):
# test for distance first
lat_seg = lat[start_idx:end_idx]
lon_seg = lon[start_idx:end_idx]
valid_lat_seg = lat_seg[np.where(np.invert(np.isnan(lat_seg)))[0]]
valid_lon_seg = lon_seg[np.where(np.invert(np.isnan(lon_seg)))[0]]
if len(valid_lon_seg) == 0:
continue
# TODO: parameters have to be tuned carefully
is_train = predict_mode_by_location(valid_lat_seg,
valid_lon_seg,
self.train_location_tree,
self.train_location_dict,
self.train_route_dict,
dist_thre = 400,
dist_pass_thres = 7,
num_stops_thre = 3,
dist_pass_thres_perc = 0.7)
#check entry point distance
entry_pt_near = -1
exit_pt_near = -1
if start_idx-1>=0:
if not np.isnan(lat[start_idx-1]):
nearest_station = find_nearest_station(lat[start_idx-1], lon[start_idx-1], self.train_location_tree, self.dist_thres_entry_exit)
if len(nearest_station)!=0:
entry_pt_near = 1
else:
entry_pt_near = 0
if end_idx < len(modes):
if not np.isnan(lat[end_idx]):
nearest_station = find_nearest_station(lat[end_idx],lon[end_idx],
self.train_location_tree,
self.dist_thres_entry_exit)
if len(nearest_station)!=0:
exit_pt_near = 1
else:
exit_pt_near = 0
if is_train or entry_pt_near + exit_pt_near == 2:
modes[start_idx:end_idx] = MODE_TRAIN
else:
modes[start_idx:end_idx] = MODE_CAR
return modes
示例10: crack_ecb_oracle
def crack_ecb_oracle(oracle_fn, prefix_length=0):
block_size = guess_block_size(oracle_fn)
if not looks_like_ecb(oracle_fn(b"A" * 100), block_size):
raise ValueError("oracle_fn does not appear to produce ECB mode output")
result = bytearray()
while True:
short_block_length = (block_size - len(result) - 1 - prefix_length) % block_size
short_input_block = b"A" * short_block_length
block_index = (len(result) + prefix_length) // block_size
block_to_look_for = chunks(oracle_fn(short_input_block))[block_index]
for guess in all_bytes_by_frequency:
test_input = short_input_block + result + bytes([guess])
if chunks(oracle_fn(test_input))[block_index] == block_to_look_for:
result.append(guess)
break
else: # if no byte matches
return pkcs7_unpad(result)
示例11: cluster_to_kml
def cluster_to_kml(user, cluster, cluster_id):
"""
Creates a single, or possibly multiple KML files a given cluster.
A KML file is limited by MyMaps to having only 10 layers, so only
10 sections will be in a given KML file.
Responsibilty of caller to check existence and formatting of cluster
"""
Sections = get_section_db()
for i,chunk in enumerate(chunks(cluster,10)):
sections = map(lambda section_id: Sections.find_one({'_id':section_id}), chunk)
sections_to_kml("%s_cluster_data_kml/CLUSTER_%s_%i" % (user, str(cluster_id), i), sections)
示例12: add_text
def add_text(self, text):
if len(text) + len(self._lines[self.point[0]]) > self.draw_width:
self.point_to_next_line()
if len(text) > self.draw_width:
lines_to_add = chunks(text, self.draw_width)
lines_to_advance = len(lines_to_add)
for line in lines_to_add:
self._lines.append(line)
self.adjust_point_by_lines(lines_to_advance)
else:
self._lines[self.point[0]] += text
self.point_to_end_of_line()
示例13: cross_validation
def cross_validation(self, fold, epoch):
print 'doing cross validation...'
splited_data = list(chunks(self.data, fold))
hyper_test = defaultdict(int)
for idx, (train, test) in enumerate(splited_data):
for c in self.C:
for rho_0 in self.RHO_0:
weight = self.train(train, rho_0, c, epoch=epoch)
precision = self.test(test, weight)
print 'done fold %i' % idx, ' on [rho_0: %s, c: %s]' \
% (rho_0, c)
hyper_test[(rho_0, c)] += precision
return map(lambda (x, y): (x, y/fold), hyper_test.iteritems())
示例14: start_producers
def start_producers(self, result_queue):
jobs = Queue()
n_workers = params.N_PRODUCERS
batch_count = 0
#Flag used for keeping values in queue in order
last_queued_job = Value('i', -1)
for job_index, batch in enumerate(util.chunks(self.X,self.batch_size)):
batch_count += 1
jobs.put( (job_index,batch) )
# Define producer (putting items into queue)
def produce(id):
while True:
job_index, task = jobs.get()
if task is None:
#print id, " fully done!"
break
result = self.gen(task)
while(True):
#My turn to add job done
if last_queued_job.value == job_index-1:
with last_queued_job.get_lock():
result_queue.put(result)
last_queued_job.value += 1
#print id, " worker PUT", job_index
break
#Start workers
for i in xrange(n_workers):
if params.MULTIPROCESS:
p = Process(target=produce, args=(i,))
else:
p = Thread(target=produce, args=(i,))
p.daemon = True
p.start()
#Add poison pills to queue (to signal workers to stop)
for i in xrange(n_workers):
jobs.put((-1,None))
return batch_count, jobs
示例15: threshold_optimization
def threshold_optimization(p, y):
print "Optimizing threshold"
y_images = util.chunks(y, 384*512)
def dice_objective(threshold):
p_binary = np.where(p > threshold, 1,0)
p_images_binary = util.chunks(p_binary, 384*512)
mean, std, dices = dice(p_images_binary, y_images)
return -mean
x, v, message = scipy.optimize.fmin_l_bfgs_b(dice_objective, 0.5, approx_grad=True, bounds=[(0, 1)], epsilon=1e-03)
print "Optimized, threshold {0}, ? {1}, termination because {2}".format(x,v,message)
return x[0]