当前位置: 首页>>代码示例>>Python>>正文


Python layer.Layer方法代码示例

本文整理汇总了Python中layer.Layer方法的典型用法代码示例。如果您正苦于以下问题:Python layer.Layer方法的具体用法?Python layer.Layer怎么用?Python layer.Layer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在layer的用法示例。


在下文中一共展示了layer.Layer方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: convert_layer_to_tensor

# 需要导入模块: import layer [as 别名]
# 或者: from layer import Layer [as 别名]
def convert_layer_to_tensor(layer, dtype=None, name=None, as_ref=False):
    if not isinstance(layer, (Layer, Model)):
        return NotImplemented
    return layer.output 
开发者ID:akosiorek,项目名称:hart,代码行数:6,代码来源:__init__.py

示例2: __init__

# 需要导入模块: import layer [as 别名]
# 或者: from layer import Layer [as 别名]
def __init__(self, layer_structure: List[int], learning_rate: float, activation_function: Callable[[float], float] = sigmoid, derivative_activation_function: Callable[[float], float] = derivative_sigmoid) -> None:
        if len(layer_structure) < 3:
            raise ValueError("Error: Should be at least 3 layers (1 input, 1 hidden, 1 output)")
        self.layers: List[Layer] = []
        # input layer
        input_layer: Layer = Layer(None, layer_structure[0], learning_rate, activation_function, derivative_activation_function)
        self.layers.append(input_layer)
        # hidden layers and output layer
        for previous, num_neurons in enumerate(layer_structure[1::]):
            next_layer = Layer(self.layers[previous], num_neurons, learning_rate, activation_function, derivative_activation_function)
            self.layers.append(next_layer)

    # Pushes input data to the first layer, then output from the first
    # as input to the second, second to the third, etc. 
开发者ID:davecom,项目名称:ClassicComputerScienceProblemsInPython,代码行数:16,代码来源:network.py

示例3: __init__

# 需要导入模块: import layer [as 别名]
# 或者: from layer import Layer [as 别名]
def __init__(self,FLAGS, env, agent_params):

        self.FLAGS = FLAGS
        self.sess = tf.Session()

        # Set subgoal testing ratio each layer will use
        self.subgoal_test_perc = agent_params["subgoal_test_perc"]

        # Create agent with number of levels specified by user
        self.layers = [Layer(i,FLAGS,env,self.sess,agent_params) for i in range(FLAGS.layers)]

        # Below attributes will be used help save network parameters
        self.saver = None
        self.model_dir = None
        self.model_loc = None

        # Initialize actor/critic networks.  Load saved parameters if not retraining
        self.initialize_networks()

        # goal_array will store goal for each layer of agent.
        self.goal_array = [None for i in range(FLAGS.layers)]

        self.current_state = None

        # Track number of low-level actions executed
        self.steps_taken = 0

        # Below hyperparameter specifies number of Q-value updates made after each episode
        self.num_updates = 40

        # Below parameters will be used to store performance results
        self.performance_log = []

        self.other_params = agent_params


    # Determine whether or not each layer's goal was achieved.  Also, if applicable, return the highest level whose goal was achieved. 
开发者ID:andrew-j-levy,项目名称:Hierarchical-Actor-Critc-HAC-,代码行数:39,代码来源:agent.py

示例4: __init__

# 需要导入模块: import layer [as 别名]
# 或者: from layer import Layer [as 别名]
def __init__(self,FLAGS, env, agent_params):

        self.FLAGS = FLAGS
        self.sess = tf.Session()

        # Set subgoal testing ratio each layer will use
        self.subgoal_test_perc = agent_params["subgoal_test_perc"]

        # Create agent with number of levels specified by user       
        self.layers = [Layer(i,FLAGS,env,self.sess,agent_params) for i in range(FLAGS.layers)]        

        # Below attributes will be used help save network parameters
        self.saver = None
        self.model_dir = None
        self.model_loc = None

        # Initialize actor/critic networks.  Load saved parameters if not retraining
        self.initialize_networks()   
        
        # goal_array will store goal for each layer of agent.
        self.goal_array = [None for i in range(FLAGS.layers)]

        self.current_state = None

        # Track number of low-level actions executed
        self.steps_taken = 0

        # Below hyperparameter specifies number of Q-value updates made after each episode
        self.num_updates = 40

        # Below parameters will be used to store performance results
        self.performance_log = []

        self.other_params = agent_params


    # Determine whether or not each layer's goal was achieved.  Also, if applicable, return the highest level whose goal was achieved. 
开发者ID:andrew-j-levy,项目名称:Hierarchical-Actor-Critc-HAC-,代码行数:39,代码来源:agent.py

示例5: __init__

# 需要导入模块: import layer [as 别名]
# 或者: from layer import Layer [as 别名]
def __init__(self, n_hidden, n_out, reg_exp_size, ae_size, id_to_reg_exp,
                 id_to_word, word_lookup_table, auto_encoder, L2_reg=0.0001):
       # sess = tf.Session()

        self.n_hidden = n_hidden
        self.n_out = n_out
        self.L2_reg = L2_reg
        self.activation = tf.tanh #modification 1
        self.auto_encoder = auto_encoder
        self.word_lookup_table = word_lookup_table
        self.id_to_word = id_to_word
        self.id_to_reg_exp = id_to_reg_exp
        rng = np.random.RandomState(random.randint(1, 2 ** 30))

        # Adapting learning rate
        self.learning_rate = OrderedDict({})
        self.batch_grad = OrderedDict({})

        # word dict size and ner dict size and reg_exp_dict size
        self.ae_size = ae_size
        self.reg_V = reg_exp_size

        self.x_in=tf.placeholder(tf.float32, shape=(None, 20, 200))#memory size is 5
        self.reg_x=tf.placeholder(tf.int32, shape=(None,))
        self.y=tf.placeholder(tf.int32)
        self.i=0

        # Skip Layer for encoder
        # The detailed tensorflow structure is used in Layer method

        self.skip_layer_ae = Layer(rng, ae_size, n_out, "tanh", self.learning_rate, self.batch_grad)
        # Skip Layer for reg,
        self.skip_layer_re = Layer(rng, self.reg_V, n_out, "tanh", self.learning_rate, self.batch_grad)
        # Hidden Layer, ae_size=n_hidden=200
        self.hiddenLayer = Layer(rng, ae_size, n_hidden, "tanh", self.learning_rate, self.batch_grad)
        # Output Layer
        self.outputLayer = Layer(rng, n_hidden, n_out, "tanh", self.learning_rate, self.batch_grad)

        # Lookup table for reg
        """
        reg_lookup_table_value = rng.uniform(low=-0.01, high=0.01, size=(self.reg_V, n_hidden))
        reg_lookup_table_value = np.asarray(reg_lookup_table_value, dtype=theano.config.floatX)
        self.reg_lookup_table = theano.shared(value=reg_lookup_table_value, name='rlt', borrow=True)
        self.learning_rate[self.reg_lookup_table] = theano.shared(value=np.ones(reg_lookup_table_value.shape,
                                                                                dtype=theano.config.floatX),
                                                                  borrow=True)
        self.batch_grad[self.reg_lookup_table] = theano.shared(value=np.zeros(reg_lookup_table_value.shape,
                                                                              dtype=theano.config.floatX), borrow=True)
        """
        reg_lookup_table_value = rng.uniform(low=-0.01, high=0.01, size=(self.reg_V, n_hidden))
        self.reg_lookup_table = tf.Variable(np.asarray(reg_lookup_table_value), dtype=tf.float64, name='rlt')
        self.learning_rate[self.reg_lookup_table]=tf.Variable(np.ones(reg_lookup_table_value.shape),dtype=tf.float64, name='learnrate')

        print (reg_lookup_table_value.shape)
        self.batch_grad[self.reg_lookup_table]=tf.Variable(np.zeros(reg_lookup_table_value.shape),dtype=tf.float64,name='batchgrad')
        self.params = self.hiddenLayer.params + self.outputLayer.params + self.skip_layer_ae.params + self.skip_layer_re.params + [
            self.reg_lookup_table]

        #sess.run(tf.initialize_all_variables()) 
开发者ID:georgeiswang,项目名称:Query_Classfication_LSTM,代码行数:61,代码来源:dnn_lstm.py


注:本文中的layer.Layer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。