本文整理汇总了Python中six.next方法的典型用法代码示例。如果您正苦于以下问题:Python six.next方法的具体用法?Python six.next怎么用?Python six.next使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类six
的用法示例。
在下文中一共展示了six.next方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: make_input_fn_from_generator
# 需要导入模块: import six [as 别名]
# 或者: from six import next [as 别名]
def make_input_fn_from_generator(gen):
"""Use py_func to yield elements from the given generator."""
first_ex = six.next(gen)
flattened = tf.contrib.framework.nest.flatten(first_ex)
types = [t.dtype for t in flattened]
shapes = [[None] * len(t.shape) for t in flattened]
first_ex_list = [first_ex]
def py_func():
if first_ex_list:
example = first_ex_list.pop()
else:
example = six.next(gen)
return tf.contrib.framework.nest.flatten(example)
def input_fn():
flat_example = tf.py_func(py_func, [], types)
_ = [t.set_shape(shape) for t, shape in zip(flat_example, shapes)]
example = tf.contrib.framework.nest.pack_sequence_as(first_ex, flat_example)
return example
return input_fn
示例2: infer
# 需要导入模块: import six [as 别名]
# 或者: from six import next [as 别名]
def infer(save_dirname=None):
place = fluid.CPUPlace()
exe = fluid.Executor(place)
inference_scope = fluid.core.Scope()
with fluid.scope_guard(inference_scope):
[inference_program, feed_target_names, fetch_targets] = (
fluid.io.load_inference_model(save_dirname, exe))
test_reader = paddle.batch(paddle.dataset.uci_housing.test(), batch_size=20)
test_data = six.next(test_reader())
test_feat = numpy.array(list(map(lambda x: x[0], test_data))).astype("float32")
test_label = numpy.array(list(map(lambda x: x[1], test_data))).astype("float32")
results = exe.run(inference_program,
feed={feed_target_names[0]: numpy.array(test_feat)},
fetch_list=fetch_targets)
print("infer results: ", results[0])
print("ground truth: ", test_label)
# Run train and infer.
示例3: __init__
# 需要导入模块: import six [as 别名]
# 或者: from six import next [as 别名]
def __init__(self,
placeholders,
ordered_dict_of_arrays,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(ordered_dict_of_arrays) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(ordered_dict_of_arrays), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._ordered_dict_of_arrays = ordered_dict_of_arrays
self._max = len(next(iter(ordered_dict_of_arrays.values())))
for _, v in ordered_dict_of_arrays.items():
if len(v) != self._max:
raise ValueError("Array lengths must match.")
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
示例4: __call__
# 需要导入模块: import six [as 别名]
# 或者: from six import next [as 别名]
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
list_dict = {}
list_dict_size = 0
while list_dict_size < self._batch_size:
try:
data_row = next(self._iterator)
except StopIteration:
self._epoch += 1
self._iterator = self._generator_function()
data_row = next(self._iterator)
for index, key in enumerate(self._keys):
if key not in data_row.keys():
raise KeyError("key mismatch between dicts emitted by GenFun"
"Expected {} keys; got {}".format(
self._keys, data_row.keys()))
list_dict.setdefault(self._col_placeholders[index],
list()).append(data_row[key])
list_dict_size += 1
feed_dict = {key: np.asarray(item) for key, item in list(list_dict.items())}
return feed_dict
示例5: next_key
# 需要导入模块: import six [as 别名]
# 或者: from six import next [as 别名]
def next_key(self):
"""The key names of the next (in iteration) truncated unrolled examples.
The format of the key is:
```python
"%05d_of_%05d:%s" % (sequence + 1, sequence_count, original_key)
```
if `sequence + 1 < sequence_count`, otherwise:
```python
"STOP:%s" % original_key
```
where `original_key` is the unique key read in by the prefetcher.
Returns:
A string vector of length `batch_size`, the keys.
"""
return self._state_saver._received_next_key
示例6: request_cancel_external_workflow_execution
# 需要导入模块: import six [as 别名]
# 或者: from six import next [as 别名]
def request_cancel_external_workflow_execution(self, external_workflow_execution):
"""Requests cancellation of another workflow.
:param external_workflow_execution: details of target workflow to cancel
:type external_workflow_execution: botoflow.workflow_execution.WorkflowExecution
:return: cancel Future
:rtype: awsflow.core.future.Future
"""
self._decider._decisions.append(RequestCancelExternalWorkflowExecution(
workflow_id=external_workflow_execution.workflow_id,
run_id=external_workflow_execution.run_id))
cancel_future = Future()
context = AsyncTaskContext(False, get_async_context())
cancel_future.context = context
handler = self._handle_external_workflow_event(external_workflow_execution, cancel_future)
six.next(handler)
self._open_cancel_requests[external_workflow_execution] = {'handler': handler}
return cancel_future
示例7: handle_execute_timer
# 需要导入模块: import six [as 别名]
# 或者: from six import next [as 别名]
def handle_execute_timer(self, seconds):
decision_id = self._decider.get_next_id()
timer_decision = StartTimer(decision_id, str(int(seconds)))
self._decider._decisions.append(timer_decision)
timer_future = Future()
handler = self._handler_fsm(decision_id, timer_future)
six.next(handler) # arm
self._open_timers[decision_id] = {'future': timer_future, 'handler': handler}
@coroutine
def wait_for_timer():
yield timer_future
return wait_for_timer()
示例8: make_input_fn_from_generator
# 需要导入模块: import six [as 别名]
# 或者: from six import next [as 别名]
def make_input_fn_from_generator(gen):
"""Use py_func to yield elements from the given generator."""
first_ex = six.next(gen)
flattened = contrib.framework().nest.flatten(first_ex)
types = [t.dtype for t in flattened]
shapes = [[None] * len(t.shape) for t in flattened]
first_ex_list = [first_ex]
def py_func():
if first_ex_list:
example = first_ex_list.pop()
else:
example = six.next(gen)
return contrib.framework().nest.flatten(example)
def input_fn():
flat_example = tf.py_func(py_func, [], types)
_ = [t.set_shape(shape) for t, shape in zip(flat_example, shapes)]
example = contrib.framework().nest.pack_sequence_as(first_ex, flat_example)
return example
return input_fn
示例9: mix_generators
# 需要导入模块: import six [as 别名]
# 或者: from six import next [as 别名]
def mix_generators(generator_list):
"""Given python generators, generate from one, then from another, etc."""
i = 0
l = len(generator_list)
stopiters_seen = 0
while stopiters_seen <= l:
try:
yield six.next(generator_list[i % l])
i += 1
stopiters_seen = 0
except StopIteration:
i += 1
stopiters_seen += 1
# File names and Google drive ids for the training/eval/test Wikipedia data.
示例10: __next__
# 需要导入模块: import six [as 别名]
# 或者: from six import next [as 别名]
def __next__(self):
"""
Returns a new minibatch of data with each call.
Yields:
tuple: The next minibatch which includes both features and labels.
"""
if self.index >= self.total_iterations:
raise StopIteration
self.index += 1
total, batch_bufs = self.get_at_most(self.batch_size)
while total < self.batch_size:
bsz, next_batch_bufs = self.get_at_most(self.batch_size - total)
batch_bufs = {k: np.concatenate([batch_bufs[k], next_batch_bufs[k]])
for k in batch_bufs}
total += bsz
batch_bufs['iteration'] = self.index
return batch_bufs
示例11: from_indicators
# 需要导入模块: import six [as 别名]
# 或者: from six import next [as 别名]
def from_indicators(MultiClassLabels, indicator, index=None, task_name=None):
import six
labels = MultiClassLabels()
n_samples = len(six.next(six.itervalues(indicator)))
# if index is None:
# index = pd.Series(np.arange(n_samples), name='index')
indicator_df = pd.DataFrame(indicator, index=index)
assert np.all(indicator_df.sum(axis=1).values), (
'states in the same task must be mutually exclusive')
labels.indicator_df = indicator_df
labels.class_names = indicator_df.columns.values
labels.encoded_df = pd.DataFrame(
indicator_df.values.argmax(axis=1),
columns=[task_name],
index=index,
)
labels.task_name = task_name
labels.n_samples = n_samples
labels.n_classes = len(labels.class_names)
if labels.n_classes == 1:
labels.n_classes = 2 # 1 column means binary case
labels.classes_ = np.arange(labels.n_classes)
labels.default_class_name = labels.class_names[1]
return labels
示例12: lstm_model
# 需要导入模块: import six [as 别名]
# 或者: from six import next [as 别名]
def lstm_model(seq_len=100, batch_size=None, stateful=True):
"""Language model: predict the next char given the current sequence."""
source = tf.keras.Input(
name='seed', shape=(seq_len,), batch_size=batch_size, dtype=tf.int32)
embedding = tf.keras.layers.Embedding(
input_dim=256, output_dim=EMBEDDING_DIM)(source)
lstm_1 = tf.keras.layers.LSTM(
EMBEDDING_DIM, stateful=stateful, return_sequences=True)(embedding)
lstm_2 = tf.keras.layers.LSTM(
EMBEDDING_DIM, stateful=stateful, return_sequences=True)(lstm_1)
predicted_char = tf.keras.layers.TimeDistributed(
tf.keras.layers.Dense(256, activation='softmax'))(lstm_2)
model = tf.keras.Model(
inputs=[source], outputs=[predicted_char],
)
model.compile(
optimizer=tf.train.RMSPropOptimizer(learning_rate=0.01),
loss='sparse_categorical_crossentropy',
metrics=['sparse_categorical_accuracy'])
return model
示例13: test_get_groups
# 需要导入模块: import six [as 别名]
# 或者: from six import next [as 别名]
def test_get_groups(self):
self.prepare_response("GET", "/ws/Group", EXAMPLE_GET_GROUPS)
it = self.dc.devicecore.get_groups()
grp = six.next(it)
self.assertEqual(grp.is_root(), True)
self.assertEqual(grp.get_id(), "11817")
self.assertEqual(grp.get_name(), "7603_Digi")
self.assertEqual(grp.get_description(), "7603_Digi root group")
self.assertEqual(grp.get_path(), "/7603_Digi/")
self.assertEqual(grp.get_parent_id(), "1")
grp = six.next(it)
self.assertEqual(grp.is_root(), False)
self.assertEqual(grp.get_id(), "13542")
self.assertEqual(grp.get_name(), "Demo")
self.assertEqual(grp.get_description(), "")
self.assertEqual(grp.get_path(), "/7603_Digi/Demo/")
self.assertEqual(grp.get_parent_id(), "11817")
示例14: get
# 需要导入模块: import six [as 别名]
# 或者: from six import next [as 别名]
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Yields
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except Exception as e:
self.stop()
six.reraise(*sys.exc_info())
示例15: __str__
# 需要导入模块: import six [as 别名]
# 或者: from six import next [as 别名]
def __str__(self):
return self.tvalue
#========================================================================
# Class: f_tokens
# Description: An ordered list of tokens
# Attributes: items - Ordered list
# index - Current position in the list
#
# Methods: f_tokens - __init__()
# f_token - add() - Add a token to the end of the list
# None - addRef() - Add a token to the end of the list
# None - reset() - reset the index to -1
# Boolean - BOF() - End of list?
# Boolean - EOF() - Beginning of list?
# Boolean - moveNext() - Move the index along one
# f_token/None - current() - Return the current token
# f_token/None - next() - Return the next token (leave the index unchanged)
# f_token/None - previous() - Return the previous token (leave the index unchanged)
#========================================================================