本文整理匯總了Python中itertools.izip方法的典型用法代碼示例。如果您正苦於以下問題:Python itertools.izip方法的具體用法?Python itertools.izip怎麽用?Python itertools.izip使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類itertools
的用法示例。
在下文中一共展示了itertools.izip方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: tag
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import izip [as 別名]
def tag(self, data_iter):
"""A tagging function.
Args:
data_iter: A iterator for generate batches.
Returns:
A generator for tagging result.
"""
output = []
for data in data_iter:
batch = data_to_ids(data, [self.item2id] + [self.word2id] * self.parameters['word_window_size'])
batch = create_input(batch)
seq_ids, seq_other_ids_list, seq_lengths = batch[0], batch[1: -1], batch[-1]
feed_dict = {self.seq_ids_pl: seq_ids.astype(INT_TYPE),
self.seq_lengths_pl: seq_lengths.astype(INT_TYPE),
self.is_train_pl: False}
for pl, v in zip(self.seq_other_ids_pls, seq_other_ids_list):
feed_dict[pl] = v.astype(INT_TYPE)
scores = self.sess.run(self.scores_op, feed_dict)
stag_ids = self.inference(scores, seq_lengths)
for seq, stag_id, length in izip(data[0], stag_ids, seq_lengths):
output.append((seq, [self.id2tag[t] for t in stag_id[:length]]))
yield zip(*output)
output = []
示例2: create_input
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import izip [as 別名]
def create_input(batch):
"""
Take each sentence data in batch and return an input for
the training or the evaluation function.
"""
assert len(batch) > 0
lengths = [len(seq) for seq in batch[0]]
max_len = max(2, max(lengths))
ret = []
for d in batch:
dd = []
for seq_id, pos in izip(d, lengths):
assert len(seq_id) == pos
pad = [0] * (max_len - pos)
dd.append(seq_id + pad)
ret.append(np.array(dd))
ret.append(np.array(lengths))
return ret
示例3: go
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import izip [as 別名]
def go(fhs):
fmt = None
with open(opt_vocab, 'w') as vocab_out:
with open(opt_output, 'w') as vecs_out:
for lines in izip(*fhs):
parts = [line.split() for line in lines]
token = parts[0][0]
if any(part[0] != token for part in parts[1:]):
raise IOError('vector files must be aligned')
print >> vocab_out, token
vec = [sum(float(x) for x in xs) for xs in zip(*parts)[1:]]
if not fmt:
fmt = struct.Struct('%df' % len(vec))
vecs_out.write(fmt.pack(*vec))
示例4: convert_to_graph_tool
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import izip [as 別名]
def convert_to_graph_tool(G):
timer = utils.Timer()
timer.tic()
gtG = gt.Graph(directed=G.is_directed())
gtG.ep['action'] = gtG.new_edge_property('int')
nodes_list = G.nodes()
nodes_array = np.array(nodes_list)
nodes_id = np.zeros((nodes_array.shape[0],), dtype=np.int64)
for i in range(nodes_array.shape[0]):
v = gtG.add_vertex()
nodes_id[i] = int(v)
# d = {key: value for (key, value) in zip(nodes_list, nodes_id)}
d = dict(itertools.izip(nodes_list, nodes_id))
for src, dst, data in G.edges_iter(data=True):
e = gtG.add_edge(d[src], d[dst])
gtG.ep['action'][e] = data['action']
nodes_to_id = d
timer.toc(average=True, log_at=1, log_str='src.graph_utils.convert_to_graph_tool')
return gtG, nodes_array, nodes_to_id
示例5: averageSeries
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import izip [as 別名]
def averageSeries(requestContext, *seriesLists):
"""
Short Alias: avg()
Takes one metric or a wildcard seriesList.
Draws the average value of all metrics passed at each time.
Example:
.. code-block:: none
&target=averageSeries(company.server.*.threads.busy)
"""
yield defer.succeed(None)
(seriesList, start, end, step) = normalize(seriesLists)
name = "averageSeries(%s)" % formatPathExpressions(seriesList)
values = (safeDiv(safeSum(row), safeLen(row)) for row in izip(*seriesList))
series = TimeSeries(name, start, end, step, values)
series.pathExpression = name
returnValue([series])
示例6: stddevSeries
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import izip [as 別名]
def stddevSeries(requestContext, *seriesLists):
"""
Takes one metric or a wildcard seriesList.
Draws the standard deviation of all metrics passed at each time.
Example:
.. code-block:: none
&target=stddevSeries(company.server.*.threads.busy)
"""
yield defer.succeed(None)
(seriesList, start, end, step) = normalize(seriesLists)
name = "stddevSeries(%s)" % formatPathExpressions(seriesList)
values = (safeStdDev(row) for row in izip(*seriesList))
series = TimeSeries(name, start, end, step, values)
series.pathExpression = name
returnValue([series])
示例7: maxSeries
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import izip [as 別名]
def maxSeries(requestContext, *seriesLists):
"""
Takes one metric or a wildcard seriesList.
For each datapoint from each metric passed in, pick the maximum value and graph it.
Example:
.. code-block:: none
&target=maxSeries(Server*.connections.total)
"""
yield defer.succeed(None)
(seriesList, start, end, step) = normalize(seriesLists)
name = "maxSeries(%s)" % formatPathExpressions(seriesList)
values = (safeMax(row) for row in izip(*seriesList))
series = TimeSeries(name, start, end, step, values)
series.pathExpression = name
returnValue([series])
示例8: rangeOfSeries
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import izip [as 別名]
def rangeOfSeries(requestContext, *seriesLists):
"""
Takes a wildcard seriesList.
Distills down a set of inputs into the range of the series
Example:
.. code-block:: none
&target=rangeOfSeries(Server*.connections.total)
"""
yield defer.succeed(None)
(seriesList, start, end, step) = normalize(seriesLists)
name = "rangeOfSeries(%s)" % formatPathExpressions(seriesList)
values = (safeSubtract(max(row), min(row)) for row in izip(*seriesList))
series = TimeSeries(name, start, end, step, values)
series.pathExpression = name
returnValue([series])
示例9: percentileOfSeries
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import izip [as 別名]
def percentileOfSeries(requestContext, seriesList, n, interpolate=False):
"""
percentileOfSeries returns a single series which is composed of the n-percentile
values taken across a wildcard series at each point. Unless `interpolate` is
set to True, percentile values are actual values contained in one of the
supplied series.
"""
yield defer.succeed(None)
if n <= 0:
raise ValueError(
'The requested percent is required to be greater than 0')
name = 'percentileOfSeries(%s,%g)' % (seriesList[0].pathExpression, n)
(start, end, step) = normalize([seriesList])[1:]
values = [_getPercentile(row, n, interpolate) for row in izip(*seriesList)]
resultSeries = TimeSeries(name, start, end, step, values)
resultSeries.pathExpression = name
returnValue([resultSeries])
示例10: countSeries
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import izip [as 別名]
def countSeries(requestContext, *seriesLists):
"""
Draws a horizontal line representing the number of nodes found in the seriesList.
.. code-block:: none
&target=countSeries(carbon.agents.*.*)
"""
yield defer.succeed(None)
(seriesList, start, end, step) = normalize(seriesLists)
name = "countSeries(%s)" % formatPathExpressions(seriesList)
values = (int(len(row)) for row in izip(*seriesList))
series = TimeSeries(name, start, end, step, values)
series.pathExpression = name
returnValue([series])
示例11: _create_config_parameters
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import izip [as 別名]
def _create_config_parameters():
"""Creates a config value pair for parameterized test cases.
Yields:
A list containing the list of configs and their values.
"""
string_config_value = 'config value 1'
integer_config_value = 1
bool_config_value = True
list_config_value = ['email1', 'email2']
config_ids = ['string_config', 'integer_config', 'bool_config', 'list_config']
config_values = [
string_config_value, integer_config_value, bool_config_value,
list_config_value
]
for i in itertools.izip(config_ids, config_values):
yield [i]
示例12: _calculate_annual_sunlight_exposure
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import izip [as 別名]
def _calculate_annual_sunlight_exposure(
values, hoys, threshhold=None, blinds_state_ids=None, occ_schedule=None,
target_hours=None):
threshhold = threshhold or 1000
target_hours = target_hours or 250
schedule = occ_schedule or Schedule.eight_am_to_six_pm()
ase = 0
problematic_hours = []
for h, v in zip(hoys, values):
if h not in schedule:
continue
if v > threshhold:
ase += 1
problematic_hours.append(h)
return ase < target_hours, ase, problematic_hours
示例13: check_extract_features_returns_correct_shape
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import izip [as 別名]
def check_extract_features_returns_correct_shape(
self, batch_size, image_height, image_width, depth_multiplier,
pad_to_multiple, expected_feature_map_shapes, use_explicit_padding=False,
use_keras=False):
def graph_fn(image_tensor):
return self._extract_features(image_tensor,
depth_multiplier,
pad_to_multiple,
use_explicit_padding,
use_keras=use_keras)
image_tensor = np.random.rand(batch_size, image_height, image_width,
3).astype(np.float32)
feature_maps = self.execute(graph_fn, [image_tensor])
for feature_map, expected_shape in itertools.izip(
feature_maps, expected_feature_map_shapes):
self.assertAllEqual(feature_map.shape, expected_shape)
示例14: check_extract_features_returns_correct_shapes_with_dynamic_inputs
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import izip [as 別名]
def check_extract_features_returns_correct_shapes_with_dynamic_inputs(
self, batch_size, image_height, image_width, depth_multiplier,
pad_to_multiple, expected_feature_map_shapes, use_explicit_padding=False,
use_keras=False):
def graph_fn(image_height, image_width):
image_tensor = tf.random_uniform([batch_size, image_height, image_width,
3], dtype=tf.float32)
return self._extract_features(image_tensor,
depth_multiplier,
pad_to_multiple,
use_explicit_padding,
use_keras=use_keras)
feature_maps = self.execute_cpu(graph_fn, [
np.array(image_height, dtype=np.int32),
np.array(image_width, dtype=np.int32)
])
for feature_map, expected_shape in itertools.izip(
feature_maps, expected_feature_map_shapes):
self.assertAllEqual(feature_map.shape, expected_shape)
示例15: _check_returns_correct_shape
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import izip [as 別名]
def _check_returns_correct_shape(
self, batch_size, image_height, image_width, depth_multiplier,
expected_feature_map_shapes, use_explicit_padding=False, min_depth=None,
layer_names=None):
def graph_fn(image_tensor):
model = self._create_application_with_layer_outputs(
layer_names=layer_names,
batchnorm_training=False, use_explicit_padding=use_explicit_padding,
min_depth=min_depth,
alpha=depth_multiplier)
return model(image_tensor)
image_tensor = np.random.rand(batch_size, image_height, image_width,
3).astype(np.float32)
feature_maps = self.execute(graph_fn, [image_tensor])
for feature_map, expected_shape in itertools.izip(
feature_maps, expected_feature_map_shapes):
self.assertAllEqual(feature_map.shape, expected_shape)