当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.Placeholder方法代码示例

本文整理汇总了Python中tensorflow.Placeholder方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.Placeholder方法的具体用法?Python tensorflow.Placeholder怎么用?Python tensorflow.Placeholder使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.Placeholder方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: eran_input

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Placeholder [as 别名]
def eran_input(shape, name=None):
    """
    adds a tf.Placeholder to the graph. The shape will be augmented with None at the beginning as batch size
    
    Arguments
    ---------
    shape : list or tuple
        the shape of the Placeholder, has 1 to 3 entries
    name : str
        optional name for the Placeholder operation  
    
    Return
    ------
    output : tf.Tensor
        tensor associated with the Placeholder operation
    """
    assert len(shape) < 4, "shape should have less than 4 entries (batch size is taken care of)"
    batch_shape = [None]
    for s in shape:
        batch_shape.append(s)
    
    return tf.placeholder(tf.float64, batch_shape, name=name) 
开发者ID:eth-sri,项目名称:eran,代码行数:24,代码来源:eranlayers.py

示例2: export

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Placeholder [as 别名]
def export(sess, input_pl, output_tensor, input_file_pattern, output_dir):
  """Exports inference outputs to an output directory.

  Args:
    sess: tf.Session with variables already loaded.
    input_pl: tf.Placeholder for input (HWC format).
    output_tensor: Tensor for generated outut images.
    input_file_pattern: Glob file pattern for input images.
    output_dir: Output directory.
  """
  if output_dir:
    _make_dir_if_not_exists(output_dir)

  if input_file_pattern:
    for file_path in tf.gfile.Glob(input_file_pattern):
      # Grab a single image and run it through inference
      input_np = np.asarray(PIL.Image.open(file_path))
      output_np = sess.run(output_tensor, feed_dict={input_pl: input_np})
      image_np = data_provider.undo_normalize_image(output_np)
      output_path = _file_output_path(output_dir, file_path)
      PIL.Image.fromarray(image_np).save(output_path) 
开发者ID:itsamitgoel,项目名称:Gun-Detector,代码行数:23,代码来源:inference_demo.py

示例3: build_feature_placeholders

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Placeholder [as 别名]
def build_feature_placeholders(config):
  """Builds tf.Placeholder ops for feeding model features and labels.

  Args:
    config: ConfigDict containing the feature configurations.

  Returns:
    features: A dictionary containing "time_series_features" and "aux_features",
        each of which is a dictionary of tf.Placeholders of features from the
        input configuration. All features have dtype float32 and shape
        [batch_size, length].
  """
  batch_size = None  # Batch size will be dynamically specified.
  features = {"time_series_features": {}, "aux_features": {}}
  for feature_name, feature_spec in config.items():
    placeholder = tf.placeholder(
        dtype=tf.float32,
        shape=[batch_size, feature_spec.length],
        name=feature_name)

    if feature_spec.is_time_series:
      features["time_series_features"][feature_name] = placeholder
    else:
      features["aux_features"][feature_name] = placeholder

  return features 
开发者ID:google-research,项目名称:exoplanet-ml,代码行数:28,代码来源:input_ops.py

示例4: build_labels_placeholder

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Placeholder [as 别名]
def build_labels_placeholder():
  """Builds a tf.Placeholder op for feeding model labels.

  Returns:
    labels: An int64 tf.Placeholder with shape [batch_size].
  """
  batch_size = None  # Batch size will be dynamically specified.
  return tf.placeholder(dtype=tf.int64, shape=[batch_size], name="labels") 
开发者ID:google-research,项目名称:exoplanet-ml,代码行数:10,代码来源:input_ops.py

示例5: get_deterministic_network_move

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Placeholder [as 别名]
def get_deterministic_network_move(session, input_layer, output_layer, board_state, side, valid_only=False,
                                   game_spec=None):
    """Choose a move for the given board_state using a deterministic policy. A move is selected using the values from
    the output_layer and selecting the move with the highest score.

    Args:
        session (tf.Session): Session used to run this network
        input_layer (tf.Placeholder): Placeholder to the network used to feed in the board_state
        output_layer (tf.Tensor): Tensor that will output the probabilities of the moves, we expect this to be of
            dimesensions (None, board_squares).
        board_state: The board_state we want to get the move for.
        side: The side that is making the move.

    Returns:
        (np.array) It's shape is (board_squares), and it is a 1 hot encoding for the move the network has chosen.
    """
    np_board_state = np.array(board_state)
    np_board_state = np_board_state.reshape(1, *input_layer.get_shape().as_list()[1:])
    if side == -1:
        np_board_state = -np_board_state

    probability_of_actions = session.run(output_layer,
                                         feed_dict={input_layer: np_board_state})[0]

    if valid_only:
        available_moves = game_spec.available_moves(board_state)
        available_moves_flat = [game_spec.tuple_move_to_flat(x) for x in available_moves]
        for i in range(game_spec.board_squares()):
            if i not in available_moves_flat:
                probability_of_actions[i] = 0

    move = np.argmax(probability_of_actions)
    one_hot = np.zeros(len(probability_of_actions))
    one_hot[move] = 1.
    return one_hot 
开发者ID:DanielSlater,项目名称:AlphaToe,代码行数:37,代码来源:network_helpers.py

示例6: graph_gather

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Placeholder [as 别名]
def graph_gather(atoms, membership_placeholder, batch_size):
  """
  Parameters
  ----------
  atoms: tf.Tensor
    Of shape (n_atoms, n_feat)
  membership_placeholder: tf.Placeholder
    Of shape (n_atoms,). Molecule each atom belongs to.
  batch_size: int
    Batch size for deep model.

  Returns
  -------
  tf.Tensor
    Of shape (batch_size, n_feat)
  """

  # WARNING: Does not work for Batch Size 1! If batch_size = 1, then use reduce_sum!
  assert batch_size > 1, "graph_gather requires batches larger than 1"

  # Obtain the partitions for each of the molecules
  activated_par = tf.dynamic_partition(atoms, membership_placeholder,
                                       batch_size)

  # Sum over atoms for each molecule
  sparse_reps = [
      tf.reduce_sum(activated, 0, keep_dims=True) for activated in activated_par
  ]

  # Get the final sparse representations
  sparse_reps = tf.concat(axis=0, values=sparse_reps)

  return sparse_reps 
开发者ID:ZJULearning,项目名称:graph_level_drug_discovery,代码行数:35,代码来源:layers.py

示例7: get_stochastic_network_move

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Placeholder [as 别名]
def get_stochastic_network_move(session, input_layer, output_layer, board_state, side,
                                valid_only=False, game_spec=None):
    """Choose a move for the given board_state using a stocastic policy. A move is selected using the values from the
     output_layer as a categorical probability distribution to select a single move

    Args:
        session (tf.Session): Session used to run this network
        input_layer (tf.Placeholder): Placeholder to the network used to feed in the board_state
        output_layer (tf.Tensor): Tensor that will output the probabilities of the moves, we expect this to be of
            dimesensions (None, board_squares) and the sum of values across the board_squares to be 1.
        board_state: The board_state we want to get the move for.
        side: The side that is making the move.

    Returns:
        (np.array) It's shape is (board_squares), and it is a 1 hot encoding for the move the network has chosen.
    """
    np_board_state = np.array(board_state)
    if side == -1:
        np_board_state = -np_board_state

    np_board_state = np_board_state.reshape(1, *input_layer.get_shape().as_list()[1:])
    probability_of_actions = session.run(output_layer,
                                         feed_dict={input_layer: np_board_state})[0]

    if valid_only:
        available_moves = list(game_spec.available_moves(board_state))
        if len(available_moves) == 1:
            move = np.zeros(game_spec.board_squares())
            np.put(move, game_spec.tuple_move_to_flat(available_moves[0]), 1)
            return move
        available_moves_flat = [game_spec.tuple_move_to_flat(x) for x in available_moves]
        for i in range(game_spec.board_squares()):
            if i not in available_moves_flat:
                probability_of_actions[i] = 0.

        prob_mag = sum(probability_of_actions)
        if prob_mag != 0.:
            probability_of_actions /= sum(probability_of_actions)

    try:
        move = np.random.multinomial(1, probability_of_actions)
    except ValueError:
        # sometimes because of rounding errors we end up with probability_of_actions summing to greater than 1.
        # so need to reduce slightly to be a valid value
        move = np.random.multinomial(1, probability_of_actions / (1. + 1e-6))

    return move 
开发者ID:DanielSlater,项目名称:AlphaToe,代码行数:49,代码来源:network_helpers.py


注:本文中的tensorflow.Placeholder方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。