当前位置: 首页>>代码示例>>Python>>正文


Python TensorflowGraph.get_placeholder_scope方法代码示例

本文整理汇总了Python中deepchem.models.tensorflow_models.TensorflowGraph.get_placeholder_scope方法的典型用法代码示例。如果您正苦于以下问题:Python TensorflowGraph.get_placeholder_scope方法的具体用法?Python TensorflowGraph.get_placeholder_scope怎么用?Python TensorflowGraph.get_placeholder_scope使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在deepchem.models.tensorflow_models.TensorflowGraph的用法示例。


在下文中一共展示了TensorflowGraph.get_placeholder_scope方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build

# 需要导入模块: from deepchem.models.tensorflow_models import TensorflowGraph [as 别名]
# 或者: from deepchem.models.tensorflow_models.TensorflowGraph import get_placeholder_scope [as 别名]
  def build(self, graph, name_scopes, training):
    """Constructs the graph architecture of model: n_tasks * sigmoid nodes.

    This method creates the following Placeholders:
      mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
        batch_size x n_features.
    """
    placeholder_scope = TensorflowGraph.get_placeholder_scope(
        graph, name_scopes)
    n_features = self.n_features
    with graph.as_default():
      with placeholder_scope:
        self.mol_features = tf.placeholder(
            tf.float32,
            shape=[None, n_features],
            name='mol_features')

      weight_init_stddevs = self.weight_init_stddevs
      bias_init_consts = self.bias_init_consts
      lg_list = []
      for task in range(self.n_tasks):
        #setting up n_tasks nodes(output nodes)
        lg = model_ops.fully_connected_layer(
            tensor=self.mol_features,
            size = 1,
            weight_init=tf.truncated_normal(
                shape=[self.n_features, 1],
                stddev=weight_init_stddevs[0]),
            bias_init=tf.constant(value=bias_init_consts[0],
                                  shape=[1]))
        lg_list.append(lg)

    return lg_list
开发者ID:bowenliu16,项目名称:deepchem,代码行数:35,代码来源:lr.py

示例2: build

# 需要导入模块: from deepchem.models.tensorflow_models import TensorflowGraph [as 别名]
# 或者: from deepchem.models.tensorflow_models.TensorflowGraph import get_placeholder_scope [as 别名]
  def build(self, graph, name_scopes, training):
    """Constructs the graph architecture as specified in its config.

    This method creates the following Placeholders:
      mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
        batch_size x n_features.
    """
    n_features = self.n_features
    placeholder_scope = TensorflowGraph.get_placeholder_scope(
        graph, name_scopes)
    with graph.as_default():
      with placeholder_scope:
        self.mol_features = tf.placeholder(
            tf.float32,
            shape=[None, n_features],
            name='mol_features')

      layer_sizes = self.layer_sizes
      weight_init_stddevs = self.weight_init_stddevs
      bias_init_consts = self.bias_init_consts
      dropouts = self.dropouts
      lengths_set = {
          len(layer_sizes),
          len(weight_init_stddevs),
          len(bias_init_consts),
          len(dropouts),
          }
      assert len(lengths_set) == 1, 'All layer params must have same length.'
      n_layers = lengths_set.pop()
      assert n_layers > 0, 'Must have some layers defined.'

      prev_layer = self.mol_features
      prev_layer_size = n_features 
      for i in range(n_layers):
        layer = tf.nn.relu(model_ops.fully_connected_layer(
            tensor=prev_layer,
            size=layer_sizes[i],
            weight_init=tf.truncated_normal(
                shape=[prev_layer_size, layer_sizes[i]],
                stddev=weight_init_stddevs[i]),
            bias_init=tf.constant(value=bias_init_consts[i],
                                  shape=[layer_sizes[i]])))
        layer = model_ops.dropout(layer, dropouts[i])
        prev_layer = layer
        prev_layer_size = layer_sizes[i]

      output = []
      for task in range(self.n_tasks):
        output.append(tf.squeeze(
            model_ops.fully_connected_layer(
                tensor=prev_layer,
                size=layer_sizes[i],
                weight_init=tf.truncated_normal(
                    shape=[prev_layer_size, 1],
                    stddev=weight_init_stddevs[i]),
                bias_init=tf.constant(value=bias_init_consts[i],
                                      shape=[1]))))
      return output
开发者ID:apappu97,项目名称:deepchem,代码行数:60,代码来源:fcnet.py

示例3: add_label_placeholders

# 需要导入模块: from deepchem.models.tensorflow_models import TensorflowGraph [as 别名]
# 或者: from deepchem.models.tensorflow_models.TensorflowGraph import get_placeholder_scope [as 别名]
 def add_label_placeholders(self, graph, name_scopes):
   #label placeholders with size batch_size * 1
   labels = []
   placeholder_scope = TensorflowGraph.get_placeholder_scope(graph, name_scopes)
   with placeholder_scope:
     for task in range(self.n_tasks):
       labels.append(tf.identity(
           tf.placeholder(tf.float32, shape=[None,1],
                          name='labels_%d' % task)))
   return labels
开发者ID:bowenliu16,项目名称:deepchem,代码行数:12,代码来源:lr.py

示例4: build

# 需要导入模块: from deepchem.models.tensorflow_models import TensorflowGraph [as 别名]
# 或者: from deepchem.models.tensorflow_models.TensorflowGraph import get_placeholder_scope [as 别名]
  def build(self, graph, name_scopes, training):
    """Constructs the graph architecture of model: n_tasks * sigmoid nodes.

    This method creates the following Placeholders:
      mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
        batch_size x n_features.
    """
    placeholder_scope = TensorflowGraph.get_placeholder_scope(graph,
                                                              name_scopes)
    n_features = self.n_features
    with graph.as_default():
      with placeholder_scope:
        mol_features = tf.placeholder(
            tf.float32, shape=[None, n_features], name='mol_features')

      weight_init_stddevs = self.weight_init_stddevs
      bias_init_consts = self.bias_init_consts
      lg_list = []

      label_placeholders = self.add_label_placeholders(graph, name_scopes)
      weight_placeholders = self.add_example_weight_placeholders(graph,
                                                                 name_scopes)
      if training:
        graph.queue = tf.FIFOQueue(
            capacity=5,
            dtypes=[tf.float32] *
            (len(label_placeholders) + len(weight_placeholders) + 1))
        graph.enqueue = graph.queue.enqueue([mol_features] + label_placeholders
                                            + weight_placeholders)
        queue_outputs = graph.queue.dequeue()
        labels = queue_outputs[1:len(label_placeholders) + 1]
        weights = queue_outputs[len(label_placeholders) + 1:]
        prev_layer = queue_outputs[0]
      else:
        labels = label_placeholders
        weights = weight_placeholders
        prev_layer = mol_features

      for task in range(self.n_tasks):
        #setting up n_tasks nodes(output nodes)
        lg = model_ops.fully_connected_layer(
            tensor=prev_layer,
            size=1,
            weight_init=tf.truncated_normal(
                shape=[self.n_features, 1], stddev=weight_init_stddevs[0]),
            bias_init=tf.constant(value=bias_init_consts[0], shape=[1]))
        lg_list.append(lg)
    return (lg_list, labels, weights)
开发者ID:joegomes,项目名称:deepchem,代码行数:50,代码来源:lr.py

示例5: build

# 需要导入模块: from deepchem.models.tensorflow_models import TensorflowGraph [as 别名]
# 或者: from deepchem.models.tensorflow_models.TensorflowGraph import get_placeholder_scope [as 别名]
  def build(self, graph, name_scopes, training):
    """Constructs the graph architecture of IRV as described in:
       
       https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2750043/
    """
    placeholder_scope = TensorflowGraph.get_placeholder_scope(graph,
                                                              name_scopes)
    K = self.K
    with graph.as_default():
      output = []
      with placeholder_scope:
        mol_features = tf.placeholder(
            tf.float32, shape=[None, self.n_features], name='mol_features')
      with tf.name_scope('variable'):
        V = tf.Variable(tf.constant([0.01, 1.]), name="vote", dtype=tf.float32)
        W = tf.Variable(tf.constant([1., 1.]), name="w", dtype=tf.float32)
        b = tf.Variable(tf.constant([0.01]), name="b", dtype=tf.float32)
        b2 = tf.Variable(tf.constant([0.01]), name="b2", dtype=tf.float32)

      label_placeholders = self.add_label_placeholders(graph, name_scopes)
      weight_placeholders = self.add_example_weight_placeholders(graph,
                                                                 name_scopes)
      if training:
        graph.queue = tf.FIFOQueue(
            capacity=5,
            dtypes=[tf.float32] *
            (len(label_placeholders) + len(weight_placeholders) + 1))
        graph.enqueue = graph.queue.enqueue([mol_features] + label_placeholders
                                            + weight_placeholders)
        queue_outputs = graph.queue.dequeue()
        labels = queue_outputs[1:len(label_placeholders) + 1]
        weights = queue_outputs[len(label_placeholders) + 1:]
        features = queue_outputs[0]
      else:
        labels = label_placeholders
        weights = weight_placeholders
        features = mol_features

      for count in range(self.n_tasks):
        similarity = features[:, 2 * K * count:(2 * K * count + K)]
        ys = tf.to_int32(features[:, (2 * K * count + K):2 * K * (count + 1)])
        R = b + W[0] * similarity + W[1] * tf.constant(
            np.arange(K) + 1, dtype=tf.float32)
        R = tf.sigmoid(R)
        z = tf.reduce_sum(R * tf.gather(V, ys), axis=1) + b2
        output.append(tf.reshape(z, shape=[-1, 1]))
    return (output, labels, weights)
开发者ID:joegomes,项目名称:deepchem,代码行数:49,代码来源:IRV.py

示例6: add_placeholders

# 需要导入模块: from deepchem.models.tensorflow_models import TensorflowGraph [as 别名]
# 或者: from deepchem.models.tensorflow_models.TensorflowGraph import get_placeholder_scope [as 别名]
 def add_placeholders(self, graph, name_scopes):
   """Adds all placeholders for this model."""
   # Create placeholders
   placeholder_scope = TensorflowGraph.get_placeholder_scope(
       graph, name_scopes)
   labels, weights = [], []
   n_features = self.n_features
   with placeholder_scope:
     self.mol_features = tf.placeholder(
         tf.float32,
         shape=[None, n_features],
         name='mol_features')
     for task in range(self.n_tasks):
       weights.append(tf.identity(
           tf.placeholder(tf.float32, shape=[None,],
                          name='weights_%d' % task)))
       labels.append(tf.identity(
           tf.placeholder(tf.float32, shape=[None,],
                          name='labels_%d' % task)))
   return self.mol_features, labels, weights
开发者ID:deepchem,项目名称:deepchem,代码行数:22,代码来源:progressive_multitask.py

示例7: add_progressive_lattice

# 需要导入模块: from deepchem.models.tensorflow_models import TensorflowGraph [as 别名]
# 或者: from deepchem.models.tensorflow_models.TensorflowGraph import get_placeholder_scope [as 别名]
  def add_progressive_lattice(self, graph, name_scopes, training):
    """Constructs the graph architecture as specified in its config.

    This method creates the following Placeholders:
      mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
        batch_size x n_features.
    """
    n_features = self.n_features
    placeholder_scope = TensorflowGraph.get_placeholder_scope(graph,
                                                              name_scopes)
    with graph.as_default():
      layer_sizes = self.layer_sizes
      weight_init_stddevs = self.weight_init_stddevs
      bias_init_consts = self.bias_init_consts
      dropouts = self.dropouts
      lengths_set = {
          len(layer_sizes),
          len(weight_init_stddevs),
          len(bias_init_consts),
          len(dropouts),
      }
      assert len(lengths_set) == 1, 'All layer params must have same length.'
      n_layers = lengths_set.pop()
      assert n_layers > 0, 'Must have some layers defined.'

      prev_layer = self.mol_features
      prev_layer_size = n_features
      all_layers = {}
      for i in range(n_layers):
        for task in range(self.n_tasks):
          task_scope = TensorflowGraph.shared_name_scope("task%d_ops" % task,
                                                         graph, name_scopes)
          print("Adding weights for task %d, layer %d" % (task, i))
          with task_scope as scope:
            if i == 0:
              prev_layer = self.mol_features
              prev_layer_size = self.n_features
            else:
              prev_layer = all_layers[(i - 1, task)]
              prev_layer_size = layer_sizes[i - 1]
              if task > 0:
                lateral_contrib = self.add_adapter(all_layers, task, i)
            print("Creating W_layer_%d_task%d of shape %s" %
                  (i, task, str([prev_layer_size, layer_sizes[i]])))
            W = tf.Variable(
                tf.truncated_normal(
                    shape=[prev_layer_size, layer_sizes[i]],
                    stddev=self.weight_init_stddevs[i]),
                name='W_layer_%d_task%d' % (i, task),
                dtype=tf.float32)
            print("Creating b_layer_%d_task%d of shape %s" %
                  (i, task, str([layer_sizes[i]])))
            b = tf.Variable(
                tf.constant(
                    value=self.bias_init_consts[i], shape=[layer_sizes[i]]),
                name='b_layer_%d_task%d' % (i, task),
                dtype=tf.float32)
            layer = tf.matmul(prev_layer, W) + b
            if i > 0 and task > 0:
              layer = layer + lateral_contrib
            layer = tf.nn.relu(layer)
            layer = model_ops.dropout(layer, dropouts[i], training)
            all_layers[(i, task)] = layer

      output = []
      for task in range(self.n_tasks):
        prev_layer = all_layers[(i, task)]
        prev_layer_size = layer_sizes[i]
        task_scope = TensorflowGraph.shared_name_scope("task%d" % task, graph,
                                                       name_scopes)
        with task_scope as scope:
          if task > 0:
            lateral_contrib = tf.squeeze(
                self.add_adapter(all_layers, task, i + 1))
          weight_init = tf.truncated_normal(
              shape=[prev_layer_size, 1], stddev=weight_init_stddevs[i])
          bias_init = tf.constant(value=bias_init_consts[i], shape=[1])
          print("Creating W_output_task%d of shape %s" %
                (task, str([prev_layer_size, 1])))
          w = tf.Variable(
              weight_init, name='W_output_task%d' % task, dtype=tf.float32)
          print("Creating b_output_task%d of shape %s" % (task, str([1])))
          b = tf.Variable(
              bias_init, name='b_output_task%d' % task, dtype=tf.float32)
          layer = tf.squeeze(tf.matmul(prev_layer, w) + b)
          if i > 0 and task > 0:
            layer = layer + lateral_contrib
          output.append(layer)

      return output
开发者ID:joegomes,项目名称:deepchem,代码行数:92,代码来源:progressive_multitask.py

示例8: build

# 需要导入模块: from deepchem.models.tensorflow_models import TensorflowGraph [as 别名]
# 或者: from deepchem.models.tensorflow_models.TensorflowGraph import get_placeholder_scope [as 别名]
  def build(self, graph, name_scopes, training):
    """Constructs the graph architecture as specified in its config.

    This method creates the following Placeholders:
      mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
        batch_size x num_features.
    """
    num_features = self.n_features
    placeholder_scope = TensorflowGraph.get_placeholder_scope(graph,
                                                              name_scopes)
    with graph.as_default():
      with placeholder_scope:
        mol_features = tf.placeholder(
            tf.float32, shape=[None, num_features], name='mol_features')

      layer_sizes = self.layer_sizes
      weight_init_stddevs = self.weight_init_stddevs
      bias_init_consts = self.bias_init_consts
      dropouts = self.dropouts

      bypass_layer_sizes = self.bypass_layer_sizes
      bypass_weight_init_stddevs = self.bypass_weight_init_stddevs
      bypass_bias_init_consts = self.bypass_bias_init_consts
      bypass_dropouts = self.bypass_dropouts

      lengths_set = {
          len(layer_sizes),
          len(weight_init_stddevs),
          len(bias_init_consts),
          len(dropouts),
      }
      assert len(lengths_set) == 1, "All layer params must have same length."
      num_layers = lengths_set.pop()
      assert num_layers > 0, "Must have some layers defined."

      bypass_lengths_set = {
          len(bypass_layer_sizes),
          len(bypass_weight_init_stddevs),
          len(bypass_bias_init_consts),
          len(bypass_dropouts),
      }
      assert len(bypass_lengths_set) == 1, (
          "All bypass_layer params" + " must have same length.")
      num_bypass_layers = bypass_lengths_set.pop()

      label_placeholders = self.add_label_placeholders(graph, name_scopes)
      weight_placeholders = self.add_example_weight_placeholders(graph,
                                                                 name_scopes)
      if training:
        graph.queue = tf.FIFOQueue(
            capacity=5,
            dtypes=[tf.float32] *
            (len(label_placeholders) + len(weight_placeholders) + 1))
        graph.enqueue = graph.queue.enqueue([mol_features] + label_placeholders
                                            + weight_placeholders)
        queue_outputs = graph.queue.dequeue()
        labels = queue_outputs[1:len(label_placeholders) + 1]
        weights = queue_outputs[len(label_placeholders) + 1:]
        prev_layer = queue_outputs[0]
      else:
        labels = label_placeholders
        weights = weight_placeholders
        prev_layer = mol_features

      top_layer = prev_layer
      prev_layer_size = num_features
      for i in range(num_layers):
        # layer has shape [None, layer_sizes[i]]
        print("Adding weights of shape %s" %
              str([prev_layer_size, layer_sizes[i]]))
        layer = tf.nn.relu(
            model_ops.fully_connected_layer(
                tensor=prev_layer,
                size=layer_sizes[i],
                weight_init=tf.truncated_normal(
                    shape=[prev_layer_size, layer_sizes[i]],
                    stddev=weight_init_stddevs[i]),
                bias_init=tf.constant(
                    value=bias_init_consts[i], shape=[layer_sizes[i]])))
        layer = model_ops.dropout(layer, dropouts[i], training)
        prev_layer = layer
        prev_layer_size = layer_sizes[i]

      output = []
      # top_multitask_layer has shape [None, layer_sizes[-1]]
      top_multitask_layer = prev_layer
      for task in range(self.n_tasks):
        # TODO(rbharath): Might want to make it feasible to have multiple
        # bypass layers.
        # Construct task bypass layer
        prev_bypass_layer = top_layer
        prev_bypass_layer_size = num_features
        for i in range(num_bypass_layers):
          # bypass_layer has shape [None, bypass_layer_sizes[i]]
          print("Adding bypass weights of shape %s" %
                str([prev_bypass_layer_size, bypass_layer_sizes[i]]))
          bypass_layer = tf.nn.relu(
              model_ops.fully_connected_layer(
                  tensor=prev_bypass_layer,
                  size=bypass_layer_sizes[i],
#.........这里部分代码省略.........
开发者ID:joegomes,项目名称:deepchem,代码行数:103,代码来源:robust_multitask.py

示例9: build

# 需要导入模块: from deepchem.models.tensorflow_models import TensorflowGraph [as 别名]
# 或者: from deepchem.models.tensorflow_models.TensorflowGraph import get_placeholder_scope [as 别名]
  def build(self, graph, name_scopes, training):
    """Constructs the graph architecture as specified in its config.

    This method creates the following Placeholders:
      mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
        batch_size x n_features.
    """
    placeholder_scope = TensorflowGraph.get_placeholder_scope(graph,
                                                              name_scopes)
    n_features = self.n_features
    with graph.as_default():
      with placeholder_scope:
        mol_features = tf.placeholder(
            tf.float32, shape=[None, n_features], name='mol_features')

      layer_sizes = self.layer_sizes
      weight_init_stddevs = self.weight_init_stddevs
      bias_init_consts = self.bias_init_consts
      dropouts = self.dropouts
      lengths_set = {
          len(layer_sizes),
          len(weight_init_stddevs),
          len(bias_init_consts),
          len(dropouts),
      }
      assert len(lengths_set) == 1, 'All layer params must have same length.'
      n_layers = lengths_set.pop()
      assert n_layers > 0, 'Must have some layers defined.'

      label_placeholders = self.add_label_placeholders(graph, name_scopes)
      weight_placeholders = self.add_example_weight_placeholders(graph,
                                                                 name_scopes)
      if training:
        graph.queue = tf.FIFOQueue(
            capacity=5,
            dtypes=[tf.float32] *
            (len(label_placeholders) + len(weight_placeholders) + 1))
        graph.enqueue = graph.queue.enqueue([mol_features] + label_placeholders
                                            + weight_placeholders)
        queue_outputs = graph.queue.dequeue()
        labels = queue_outputs[1:len(label_placeholders) + 1]
        weights = queue_outputs[len(label_placeholders) + 1:]
        prev_layer = queue_outputs[0]
      else:
        labels = label_placeholders
        weights = weight_placeholders
        prev_layer = mol_features

      prev_layer_size = n_features
      for i in range(n_layers):
        layer = tf.nn.relu(
            model_ops.fully_connected_layer(
                tensor=prev_layer,
                size=layer_sizes[i],
                weight_init=tf.truncated_normal(
                    shape=[prev_layer_size, layer_sizes[i]],
                    stddev=weight_init_stddevs[i]),
                bias_init=tf.constant(
                    value=bias_init_consts[i], shape=[layer_sizes[i]])))
        layer = model_ops.dropout(layer, dropouts[i], training)
        prev_layer = layer
        prev_layer_size = layer_sizes[i]

      output = model_ops.multitask_logits(layer, self.n_tasks)
    return (output, labels, weights)
开发者ID:joegomes,项目名称:deepchem,代码行数:67,代码来源:fcnet.py

示例10: build

# 需要导入模块: from deepchem.models.tensorflow_models import TensorflowGraph [as 别名]
# 或者: from deepchem.models.tensorflow_models.TensorflowGraph import get_placeholder_scope [as 别名]
  def build(self, graph, name_scopes, training):
    """Constructs the graph architecture as specified in its config.

    This method creates the following Placeholders:
      mol_features: Molecule descriptor (e.g. fingerprint) tensor with shape
        batch_size x num_features.
    """
    num_features = self.n_features 
    placeholder_scope = TensorflowGraph.get_placeholder_scope(
        graph, name_scopes)
    with graph.as_default():
      with placeholder_scope:
        self.mol_features = tf.placeholder(
            tf.float32,
            shape=[None, num_features],
            name='mol_features')

      layer_sizes = self.layer_sizes
      weight_init_stddevs = self.weight_init_stddevs
      bias_init_consts = self.bias_init_consts
      dropouts = self.dropouts

      bypass_layer_sizes = self.bypass_layer_sizes
      bypass_weight_init_stddevs = self.bypass_weight_init_stddevs
      bypass_bias_init_consts = self.bypass_bias_init_consts
      bypass_dropouts = self.bypass_dropouts

      lengths_set = {
          len(layer_sizes),
          len(weight_init_stddevs),
          len(bias_init_consts),
          len(dropouts),
          }
      assert len(lengths_set) == 1, "All layer params must have same length."
      num_layers = lengths_set.pop()
      assert num_layers > 0, "Must have some layers defined."

      bypass_lengths_set = {
          len(bypass_layer_sizes),
          len(bypass_weight_init_stddevs),
          len(bypass_bias_init_consts),
          len(bypass_dropouts),
          }
      assert (len(bypass_lengths_set) == 1,
              "All bypass_layer params must have same length.")
      num_bypass_layers = bypass_lengths_set.pop()

      prev_layer = self.mol_features
      prev_layer_size = num_features 
      for i in range(num_layers):
        # layer has shape [None, layer_sizes[i]]
        ########################################################## DEBUG
        print("Adding weights of shape %s" % str([prev_layer_size, layer_sizes[i]]))
        ########################################################## DEBUG
        layer = tf.nn.relu(model_ops.fully_connected_layer(
            tensor=prev_layer,
            size=layer_sizes[i],
            weight_init=tf.truncated_normal(
                shape=[prev_layer_size, layer_sizes[i]],
                stddev=weight_init_stddevs[i]),
            bias_init=tf.constant(value=bias_init_consts[i],
                                  shape=[layer_sizes[i]])))
        layer = model_ops.dropout(layer, dropouts[i], training)
        prev_layer = layer
        prev_layer_size = layer_sizes[i]

      output = []
      # top_multitask_layer has shape [None, layer_sizes[-1]]
      top_multitask_layer = prev_layer
      for task in range(self.n_tasks):
        # TODO(rbharath): Might want to make it feasible to have multiple
        # bypass layers.
        # Construct task bypass layer
        prev_bypass_layer = self.mol_features
        prev_bypass_layer_size = num_features
        for i in range(num_bypass_layers):
          # bypass_layer has shape [None, bypass_layer_sizes[i]]
          ########################################################## DEBUG
          print("Adding bypass weights of shape %s"
                % str([prev_bypass_layer_size, bypass_layer_sizes[i]]))
          ########################################################## DEBUG
          bypass_layer = tf.nn.relu(model_ops.fully_connected_layer(
            tensor = prev_bypass_layer,
            size = bypass_layer_sizes[i],
            weight_init=tf.truncated_normal(
                shape=[prev_bypass_layer_size, bypass_layer_sizes[i]],
                stddev=bypass_weight_init_stddevs[i]),
            bias_init=tf.constant(value=bypass_bias_init_consts[i],
                                  shape=[bypass_layer_sizes[i]])))
    
          bypass_layer = model_ops.dropout(bypass_layer, bypass_dropouts[i])
          prev_bypass_layer = bypass_layer
          prev_bypass_layer_size = bypass_layer_sizes[i]
        top_bypass_layer = prev_bypass_layer

        if num_bypass_layers > 0:
          # task_layer has shape [None, layer_sizes[-1] + bypass_layer_sizes[-1]]
          task_layer = tf.concat(1, [top_multitask_layer, top_bypass_layer])
          task_layer_size = layer_sizes[-1] + bypass_layer_sizes[-1]
        else:
#.........这里部分代码省略.........
开发者ID:apappu97,项目名称:deepchem,代码行数:103,代码来源:robust_multitask.py


注:本文中的deepchem.models.tensorflow_models.TensorflowGraph.get_placeholder_scope方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。