当前位置: 首页>>代码示例>>Python>>正文


Python TensorGraph._get_tf方法代码示例

本文整理汇总了Python中deepchem.models.TensorGraph._get_tf方法的典型用法代码示例。如果您正苦于以下问题:Python TensorGraph._get_tf方法的具体用法?Python TensorGraph._get_tf怎么用?Python TensorGraph._get_tf使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在deepchem.models.TensorGraph的用法示例。


在下文中一共展示了TensorGraph._get_tf方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _build_graph

# 需要导入模块: from deepchem.models import TensorGraph [as 别名]
# 或者: from deepchem.models.TensorGraph import _get_tf [as 别名]
 def _build_graph(self, tf_graph, scope, model_dir):
   """Construct a TensorGraph containing the policy and loss calculations."""
   state_shape = self._env.state_shape
   state_dtype = self._env.state_dtype
   if not self._state_is_list:
     state_shape = [state_shape]
     state_dtype = [state_dtype]
   features = []
   for s, d in zip(state_shape, state_dtype):
     features.append(Feature(shape=[None] + list(s), dtype=tf.as_dtype(d)))
   policy_layers = self._policy.create_layers(features)
   action_prob = policy_layers['action_prob']
   value = policy_layers['value']
   search_prob = Label(shape=(None, self._env.n_actions))
   search_value = Label(shape=(None,))
   loss = MCTSLoss(
       self.value_weight,
       in_layers=[action_prob, value, search_prob, search_value])
   graph = TensorGraph(
       batch_size=self.max_search_depth,
       use_queue=False,
       graph=tf_graph,
       model_dir=model_dir)
   for f in features:
     graph._add_layer(f)
   graph.add_output(action_prob)
   graph.add_output(value)
   graph.set_loss(loss)
   graph.set_optimizer(self._optimizer)
   with graph._get_tf("Graph").as_default():
     with tf.variable_scope(scope):
       graph.build()
   if len(graph.rnn_initial_states) > 0:
     raise ValueError('MCTS does not support policies with recurrent layers')
   return graph, features, action_prob, value, search_prob, search_value
开发者ID:AhlamMD,项目名称:deepchem,代码行数:37,代码来源:mcts.py

示例2: _build_graph

# 需要导入模块: from deepchem.models import TensorGraph [as 别名]
# 或者: from deepchem.models.TensorGraph import _get_tf [as 别名]
 def _build_graph(self, tf_graph, scope, model_dir):
   """Construct a TensorGraph containing the policy and loss calculations."""
   state_shape = self._env.state_shape
   state_dtype = self._env.state_dtype
   if not self._state_is_list:
     state_shape = [state_shape]
     state_dtype = [state_dtype]
   features = []
   for s, d in zip(state_shape, state_dtype):
     features.append(Feature(shape=[None] + list(s), dtype=tf.as_dtype(d)))
   policy_layers = self._policy.create_layers(features)
   value = policy_layers['value']
   rewards = Weights(shape=(None,))
   advantages = Weights(shape=(None,))
   graph = TensorGraph(
       batch_size=self.max_rollout_length,
       use_queue=False,
       graph=tf_graph,
       model_dir=model_dir)
   for f in features:
     graph._add_layer(f)
   if 'action_prob' in policy_layers:
     self.continuous = False
     action_prob = policy_layers['action_prob']
     actions = Label(shape=(None, self._env.n_actions))
     loss = A3CLossDiscrete(
         self.value_weight,
         self.entropy_weight,
         in_layers=[rewards, actions, action_prob, value, advantages])
     graph.add_output(action_prob)
   else:
     self.continuous = True
     action_mean = policy_layers['action_mean']
     action_std = policy_layers['action_std']
     actions = Label(shape=[None] + list(self._env.action_shape))
     loss = A3CLossContinuous(
         self.value_weight,
         self.entropy_weight,
         in_layers=[
             rewards, actions, action_mean, action_std, value, advantages
         ])
     graph.add_output(action_mean)
     graph.add_output(action_std)
   graph.add_output(value)
   graph.set_loss(loss)
   graph.set_optimizer(self._optimizer)
   with graph._get_tf("Graph").as_default():
     with tf.variable_scope(scope):
       graph.build()
   if self.continuous:
     return graph, features, rewards, actions, action_mean, action_std, value, advantages
   else:
     return graph, features, rewards, actions, action_prob, value, advantages
开发者ID:ktaneishi,项目名称:deepchem,代码行数:55,代码来源:a3c.py


注:本文中的deepchem.models.TensorGraph._get_tf方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。