當前位置: 首頁>>代碼示例>>Python>>正文


Python moves.zip方法代碼示例

本文整理匯總了Python中six.moves.zip方法的典型用法代碼示例。如果您正苦於以下問題:Python moves.zip方法的具體用法?Python moves.zip怎麽用?Python moves.zip使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在six.moves的用法示例。


在下文中一共展示了moves.zip方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: multi_apply

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import zip [as 別名]
def multi_apply(func, *args, **kwargs):
    """Apply function to a list of arguments.

    Note:
        This function applies the ``func`` to multiple inputs and
            map the multiple outputs of the ``func`` into different
            list. Each list contains the same type of outputs corresponding
            to different inputs.

    Args:
        func (Function): A function that will be applied to a list of
            arguments

    Returns:
        tuple(list): A tuple containing multiple list, each list contains
            a kind of returned results by the function
    """
    pfunc = partial(func, **kwargs) if kwargs else func
    map_results = map(pfunc, *args)
    return tuple(map(list, zip(*map_results))) 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:22,代碼來源:misc.py

示例2: _linthompsamp_score

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import zip [as 別名]
def _linthompsamp_score(self, context):
        """Thompson Sampling"""
        action_ids = list(six.viewkeys(context))
        context_array = np.asarray([context[action_id]
                                    for action_id in action_ids])
        model = self._model_storage.get_model()
        B = model['B']  # pylint: disable=invalid-name
        mu_hat = model['mu_hat']
        v = self.R * np.sqrt(24 / self.epsilon
                             * self.context_dimension
                             * np.log(1 / self.delta))
        mu_tilde = self.random_state.multivariate_normal(
            mu_hat.flat, v**2 * np.linalg.inv(B))[..., np.newaxis]
        estimated_reward_array = context_array.dot(mu_hat)
        score_array = context_array.dot(mu_tilde)

        estimated_reward_dict = {}
        uncertainty_dict = {}
        score_dict = {}
        for action_id, estimated_reward, score in zip(
                action_ids, estimated_reward_array, score_array):
            estimated_reward_dict[action_id] = float(estimated_reward)
            score_dict[action_id] = float(score)
            uncertainty_dict[action_id] = float(score - estimated_reward)
        return estimated_reward_dict, uncertainty_dict, score_dict 
開發者ID:ntucllab,項目名稱:striatum,代碼行數:27,代碼來源:linthompsamp.py

示例3: unroll

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import zip [as 別名]
def unroll(self, actions, env_outputs, core_state):
    """Manual implementation of the network unroll."""
    _, _, done, _ = env_outputs

    torso_outputs = snt.BatchApply(self._torso)((actions, env_outputs))

    # Note, in this implementation we can't use CuDNN RNN to speed things up due
    # to the state reset. This can be XLA-compiled (LSTMBlockCell needs to be
    # changed to implement snt.LSTMCell).
    initial_core_state = self._core.zero_state(tf.shape(actions)[1], tf.float32)
    core_output_list = []
    for input_, d in zip(tf.unstack(torso_outputs), tf.unstack(done)):
      # If the episode ended, the core state should be reset before the next.
      core_state = nest.map_structure(
          functools.partial(tf.where, d), initial_core_state, core_state)
      core_output, core_state = self._core(input_, core_state)
      core_output_list.append(core_output)

    return snt.BatchApply(self._head)(tf.stack(core_output_list)), core_state 
開發者ID:deepmind,項目名稱:streetlearn,代碼行數:21,代碼來源:plain_agent.py

示例4: _level_set_event

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import zip [as 別名]
def _level_set_event(values, length, verb):
  """Generates `LevelSetEvent`; see _generate_sequence_event."""
  counts = combinatorics.uniform_non_negative_integers_with_sum(
      len(values), length)
  counts_dict = dict(list(zip(values, counts)))
  event = probability.CountLevelSetEvent(counts_dict)

  shuffled_values = list(values)
  random.shuffle(shuffled_values)

  counts_and_values = [
      '{} {}'.format(counts_dict[value], value)
      for value in shuffled_values
      if counts_dict[value] > 0
  ]
  counts_and_values = _word_series(counts_and_values)
  template = random.choice([
      '{verbing} {counts_and_values}',
  ])
  verbing = _GERUNDS[verb]
  event_description = template.format(
      counts_and_values=counts_and_values, verbing=verbing)
  return event, event_description 
開發者ID:deepmind,項目名稱:mathematics_dataset,代碼行數:25,代碼來源:probability.py

示例5: split

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import zip [as 別名]
def split(self, args):
    """Splits the entropy and op counts up."""
    non_integer_count = sum(not arg.is_Integer for arg in args)
    assert non_integer_count <= self.count - 1
    count_split = combinatorics.uniform_non_negative_integers_with_sum(
        len(args), (self.count - 1) - non_integer_count)
    for i, arg in enumerate(args):
      if not arg.is_Integer:
        count_split[i] += 1
    if all(count == 0 for count in count_split):
      assert self.entropy == 0
      entropies = np.zeros(len(count_split))
    else:
      entropies = (
          np.random.dirichlet(np.maximum(1e-9, count_split)) * self.entropy)
    return [_SampleArgs(op_count, entropy)
            for op_count, entropy in zip(count_split, entropies)] 
開發者ID:deepmind,項目名稱:mathematics_dataset,代碼行數:19,代碼來源:arithmetic.py

示例6: coefficients_to_polynomial

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import zip [as 別名]
def coefficients_to_polynomial(coefficients, variables):
  """Converts array of lists of coefficients to a polynomial."""
  coefficients = np.asarray(coefficients)
  shape = coefficients.shape

  indices = list(zip(*np.indices(shape).reshape([len(shape), -1])))
  monomials = []
  for power in indices:
    coeffs = coefficients.item(power)
    if (number.is_integer_or_rational(coeffs)
        or isinstance(coeffs, sympy.Symbol)):
      coeffs = [coeffs]
    elif not isinstance(coeffs, list):
      raise ValueError('Unrecognized coeffs={} type={}'
                       .format(coeffs, type(coeffs)))
    for coeff in coeffs:
      monomials.append(monomial(coeff, variables, power))
  random.shuffle(monomials)
  return ops.Add(*monomials) 
開發者ID:deepmind,項目名稱:mathematics_dataset,代碼行數:21,代碼來源:polynomials.py

示例7: inverse

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import zip [as 別名]
def inverse(self, event):
    # Specialization for `FiniteProductEvent`; don't need to take all sequences.
    if isinstance(event, FiniteProductEvent):
      assert len(event.events) == len(self._random_variables)
      zipped = list(zip(self._random_variables, event.events))
      return FiniteProductEvent(tuple(
          random_variable.inverse(sub_event)
          for random_variable, sub_event in zipped))

    # Try fallback of mapping each sequence separately.
    try:
      all_sequences = event.all_sequences()
    except AttributeError:
      raise ValueError('Unhandled event type {}'.format(type(event)))

    mapped = set()
    for sequence in all_sequences:
      assert len(sequence) == len(self._random_variables)
      zipped = list(zip(self._random_variables, sequence))
      mapped_sequence = FiniteProductEvent(tuple(
          random_variable.inverse(DiscreteEvent({element}))
          for random_variable, element in zipped))
      mapped.update(mapped_sequence.all_sequences())
    return SequenceEvent(mapped) 
開發者ID:deepmind,項目名稱:mathematics_dataset,代碼行數:26,代碼來源:probability.py

示例8: _apply_updates

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import zip [as 別名]
def _apply_updates(self, grad_func):
        qs = self._var_list
        self._define_variables(qs)
        update_ops, infos = self._update(qs, grad_func)

        with tf.control_dependencies([self.t.assign_add(1)]):
            sample_op = tf.group(*update_ops)
        list_attrib = zip(*map(lambda d: six.itervalues(d), infos))
        list_attrib_with_k = map(lambda l: dict(zip(self._latent_k, l)),
                                 list_attrib)
        attrib_names = list(six.iterkeys(infos[0]))
        dict_info = dict(zip(attrib_names, list_attrib_with_k))
        SGMCMCInfo = namedtuple("SGMCMCInfo", attrib_names)
        sgmcmc_info = SGMCMCInfo(**dict_info)

        return sample_op, sgmcmc_info 
開發者ID:thu-ml,項目名稱:zhusuan,代碼行數:18,代碼來源:sgmcmc.py

示例9: update

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import zip [as 別名]
def update(self, x):
        # x: (chain_dims data_dims)
        new_t = tf.assign(self.t, self.t + 1)
        weight = (1 - self.decay) / (1 - tf.pow(self.decay, new_t))
        # incr: (chain_dims data_dims)
        incr = [weight * (q - mean) for q, mean in zip(x, self.mean)]
        # mean: (1,...,1 data_dims)
        update_mean = [mean.assign_add(
            tf.reduce_mean(i, axis=self.chain_axes, keepdims=True))
            for mean, i in zip(self.mean, incr)]
        # var: (1,...,1 data_dims)
        new_var = [
            (1 - weight) * var +
            tf.reduce_mean(i * (q - mean), axis=self.chain_axes,
                           keepdims=True)
            for var, i, q, mean in zip(self.var, incr, x, update_mean)]

        update_var = [tf.assign(var, n_var)
                      for var, n_var in zip(self.var, new_var)]
        return update_var 
開發者ID:thu-ml,項目名稱:zhusuan,代碼行數:22,代碼來源:hmc.py

示例10: build_bnn

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import zip [as 別名]
def build_bnn(x, layer_sizes, n_particles):
    bn = zs.BayesianNet()
    h = tf.tile(x[None, ...], [n_particles, 1, 1])
    for i, (n_in, n_out) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
        w = bn.normal("w" + str(i), tf.zeros([n_out, n_in + 1]), std=1.,
                      group_ndims=2, n_samples=n_particles)
        h = tf.concat([h, tf.ones(tf.shape(h)[:-1])[..., None]], -1)
        h = tf.einsum("imk,ijk->ijm", w, h) / tf.sqrt(
            tf.cast(tf.shape(h)[2], tf.float32))
        if i < len(layer_sizes) - 2:
            h = tf.nn.relu(h)

    y_mean = bn.deterministic("y_mean", tf.squeeze(h, 2))
    y_logstd = tf.get_variable("y_logstd", shape=[],
                               initializer=tf.constant_initializer(0.))
    bn.normal("y", y_mean, logstd=y_logstd)
    return bn 
開發者ID:thu-ml,項目名稱:zhusuan,代碼行數:19,代碼來源:bnn_vi.py

示例11: build_bnn

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import zip [as 別名]
def build_bnn(x, layer_sizes, logstds, n_particles):
    bn = zs.BayesianNet()
    h = tf.tile(x[None, ...], [n_particles, 1, 1])
    for i, (n_in, n_out) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
        w = bn.normal("w" + str(i), tf.zeros([n_out, n_in + 1]),
                      logstd=logstds[i], group_ndims=2, n_samples=n_particles)
        h = tf.concat([h, tf.ones(tf.shape(h)[:-1])[..., None]], -1)
        h = tf.einsum("imk,ijk->ijm", w, h) / tf.sqrt(
            tf.cast(tf.shape(h)[2], tf.float32))
        if i < len(layer_sizes) - 2:
            h = tf.nn.relu(h)

    y_mean = bn.deterministic("y_mean", tf.squeeze(h, 2))
    y_logstd = -0.95
    bn.normal("y", y_mean, logstd=y_logstd)
    return bn 
開發者ID:thu-ml,項目名稱:zhusuan,代碼行數:18,代碼來源:bnn_sgmcmc.py

示例12: _TransformerMultiSourceInputs

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import zip [as 別名]
def _TransformerMultiSourceInputs(self, depth=3, dtype=tf.float32):
    np.random.seed(NUMPY_RANDOM_SEED)
    src_names = ['en1', 'en2', 'de']
    slens = [11, 10, 9]
    sbatch = 3
    tlen = 5
    source_vecs = tf.constant(
        np.random.uniform(size=(tlen, sbatch*2, depth)), dtype)
    source_padding = tf.constant(np.zeros([tlen, sbatch*2, 1]), dtype)
    aux_source_vecs = py_utils.NestedMap()
    aux_source_paddings = py_utils.NestedMap()
    for slen, sname in zip(slens, src_names):
      aux_source_vecs[sname] = tf.constant(
          np.random.uniform(size=[slen, sbatch, depth]), dtype)
      aux_source_paddings[sname] = tf.constant(np.zeros([slen, sbatch]), dtype)
    return (source_vecs, source_padding, aux_source_vecs, aux_source_paddings) 
開發者ID:tensorflow,項目名稱:lingvo,代碼行數:18,代碼來源:layers_test.py

示例13: _Placeholders

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import zip [as 別名]
def _Placeholders(self):
    """Return a NestedMap of placeholders to fill in for inference.

    Runs the configured input pipeline to generate the expected shapes and types
    of the inputs.

    Returns:
      A NestedMap of placeholders matching the input structure of
       the inference model.
    """
    p = self.params
    with tf.Graph().as_default():
      inputs = self.params.input.Instantiate()

    # Turn those inputs into placeholders.
    placeholders = []
    for input_shape, dtype in zip(inputs.Shape().Flatten(),
                                  inputs.DType().Flatten()):
      batched_input_shape = [p.inference_batch_size] + input_shape.as_list()
      placeholders.append(tf.placeholder(dtype, batched_input_shape))

    result = inputs.DType().Pack(placeholders)
    return result 
開發者ID:tensorflow,項目名稱:lingvo,代碼行數:25,代碼來源:point_detector.py

示例14: _ParMap

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import zip [as 別名]
def _ParMap(self, name, key_to_sub):
    """Perform parallel layers and create a NestedMap from the outputs.

    Parallel branches on an input `NestedMap`. Each branch should expect the
    same `NestedMap` as input; each branch's output will be mapped to the
    specified key in key_to_sub.

    Args:
      name: String layer name.
      key_to_sub: Dictionary mapping keys to sub params. Each sub should expect
        a NestedMap input.

    Returns:
      Params for this layer.
    """
    sorted_keys = sorted(key_to_sub.keys())
    sorted_subs = [key_to_sub[k] for k in sorted_keys]

    def _MakeNestedMap(*vals):
      return py_utils.NestedMap(dict(zip(sorted_keys, vals)))

    return self._ApplyFnMulti(name, _MakeNestedMap, *sorted_subs) 
開發者ID:tensorflow,項目名稱:lingvo,代碼行數:24,代碼來源:builder_lib.py

示例15: ExportKITTIDetection

# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import zip [as 別名]
def ExportKITTIDetection(out_dir, source_id, location_cam, dimension_cam,
                         rotation_cam, bboxes_2d, scores, class_name, is_first):
  """Write detections to a text file in KITTI format."""
  tf.logging.info("Exporting %s for %s" % (class_name, source_id))
  fname = out_dir + "/" + source_id + ".txt"
  with tf.io.gfile.GFile(fname, "a") as fid:
    # Ensure we always create a file even when there's no detection.
    # TODO(shlens): Test whether this is actually necessary on the KITTI
    # eval server.
    if is_first:
      fid.write("")
    for location, dimension, ry, bbox_2d, score in zip(
        location_cam, dimension_cam, rotation_cam, bboxes_2d, scores):
      if score < FLAGS.score_threshold:
        continue
      # class_name, truncated(ignore), alpha(ignore), bbox2D x 4
      part1 = [class_name, -1, -1, -10] + list(bbox_2d)
      # dimesion x 3, location x 3, rotation_y x 1, score x 1
      fill = tuple(part1 + list(dimension) + list(location) + [ry] + [score])
      kitti_format_string = ("%s %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf %lf "
                             "%lf %lf %lf %lf")
      kitti_line = kitti_format_string % fill
      fid.write(kitti_line + "\n") 
開發者ID:tensorflow,項目名稱:lingvo,代碼行數:25,代碼來源:export_kitti_detection.py


注:本文中的six.moves.zip方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。