當前位置: 首頁>>代碼示例>>Python>>正文


Python layer.Layer方法代碼示例

本文整理匯總了Python中layer.Layer方法的典型用法代碼示例。如果您正苦於以下問題:Python layer.Layer方法的具體用法?Python layer.Layer怎麽用?Python layer.Layer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在layer的用法示例。


在下文中一共展示了layer.Layer方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: convert_layer_to_tensor

# 需要導入模塊: import layer [as 別名]
# 或者: from layer import Layer [as 別名]
def convert_layer_to_tensor(layer, dtype=None, name=None, as_ref=False):
    if not isinstance(layer, (Layer, Model)):
        return NotImplemented
    return layer.output 
開發者ID:akosiorek,項目名稱:hart,代碼行數:6,代碼來源:__init__.py

示例2: __init__

# 需要導入模塊: import layer [as 別名]
# 或者: from layer import Layer [as 別名]
def __init__(self, layer_structure: List[int], learning_rate: float, activation_function: Callable[[float], float] = sigmoid, derivative_activation_function: Callable[[float], float] = derivative_sigmoid) -> None:
        if len(layer_structure) < 3:
            raise ValueError("Error: Should be at least 3 layers (1 input, 1 hidden, 1 output)")
        self.layers: List[Layer] = []
        # input layer
        input_layer: Layer = Layer(None, layer_structure[0], learning_rate, activation_function, derivative_activation_function)
        self.layers.append(input_layer)
        # hidden layers and output layer
        for previous, num_neurons in enumerate(layer_structure[1::]):
            next_layer = Layer(self.layers[previous], num_neurons, learning_rate, activation_function, derivative_activation_function)
            self.layers.append(next_layer)

    # Pushes input data to the first layer, then output from the first
    # as input to the second, second to the third, etc. 
開發者ID:davecom,項目名稱:ClassicComputerScienceProblemsInPython,代碼行數:16,代碼來源:network.py

示例3: __init__

# 需要導入模塊: import layer [as 別名]
# 或者: from layer import Layer [as 別名]
def __init__(self,FLAGS, env, agent_params):

        self.FLAGS = FLAGS
        self.sess = tf.Session()

        # Set subgoal testing ratio each layer will use
        self.subgoal_test_perc = agent_params["subgoal_test_perc"]

        # Create agent with number of levels specified by user
        self.layers = [Layer(i,FLAGS,env,self.sess,agent_params) for i in range(FLAGS.layers)]

        # Below attributes will be used help save network parameters
        self.saver = None
        self.model_dir = None
        self.model_loc = None

        # Initialize actor/critic networks.  Load saved parameters if not retraining
        self.initialize_networks()

        # goal_array will store goal for each layer of agent.
        self.goal_array = [None for i in range(FLAGS.layers)]

        self.current_state = None

        # Track number of low-level actions executed
        self.steps_taken = 0

        # Below hyperparameter specifies number of Q-value updates made after each episode
        self.num_updates = 40

        # Below parameters will be used to store performance results
        self.performance_log = []

        self.other_params = agent_params


    # Determine whether or not each layer's goal was achieved.  Also, if applicable, return the highest level whose goal was achieved. 
開發者ID:andrew-j-levy,項目名稱:Hierarchical-Actor-Critc-HAC-,代碼行數:39,代碼來源:agent.py

示例4: __init__

# 需要導入模塊: import layer [as 別名]
# 或者: from layer import Layer [as 別名]
def __init__(self,FLAGS, env, agent_params):

        self.FLAGS = FLAGS
        self.sess = tf.Session()

        # Set subgoal testing ratio each layer will use
        self.subgoal_test_perc = agent_params["subgoal_test_perc"]

        # Create agent with number of levels specified by user       
        self.layers = [Layer(i,FLAGS,env,self.sess,agent_params) for i in range(FLAGS.layers)]        

        # Below attributes will be used help save network parameters
        self.saver = None
        self.model_dir = None
        self.model_loc = None

        # Initialize actor/critic networks.  Load saved parameters if not retraining
        self.initialize_networks()   
        
        # goal_array will store goal for each layer of agent.
        self.goal_array = [None for i in range(FLAGS.layers)]

        self.current_state = None

        # Track number of low-level actions executed
        self.steps_taken = 0

        # Below hyperparameter specifies number of Q-value updates made after each episode
        self.num_updates = 40

        # Below parameters will be used to store performance results
        self.performance_log = []

        self.other_params = agent_params


    # Determine whether or not each layer's goal was achieved.  Also, if applicable, return the highest level whose goal was achieved. 
開發者ID:andrew-j-levy,項目名稱:Hierarchical-Actor-Critc-HAC-,代碼行數:39,代碼來源:agent.py

示例5: __init__

# 需要導入模塊: import layer [as 別名]
# 或者: from layer import Layer [as 別名]
def __init__(self, n_hidden, n_out, reg_exp_size, ae_size, id_to_reg_exp,
                 id_to_word, word_lookup_table, auto_encoder, L2_reg=0.0001):
       # sess = tf.Session()

        self.n_hidden = n_hidden
        self.n_out = n_out
        self.L2_reg = L2_reg
        self.activation = tf.tanh #modification 1
        self.auto_encoder = auto_encoder
        self.word_lookup_table = word_lookup_table
        self.id_to_word = id_to_word
        self.id_to_reg_exp = id_to_reg_exp
        rng = np.random.RandomState(random.randint(1, 2 ** 30))

        # Adapting learning rate
        self.learning_rate = OrderedDict({})
        self.batch_grad = OrderedDict({})

        # word dict size and ner dict size and reg_exp_dict size
        self.ae_size = ae_size
        self.reg_V = reg_exp_size

        self.x_in=tf.placeholder(tf.float32, shape=(None, 20, 200))#memory size is 5
        self.reg_x=tf.placeholder(tf.int32, shape=(None,))
        self.y=tf.placeholder(tf.int32)
        self.i=0

        # Skip Layer for encoder
        # The detailed tensorflow structure is used in Layer method

        self.skip_layer_ae = Layer(rng, ae_size, n_out, "tanh", self.learning_rate, self.batch_grad)
        # Skip Layer for reg,
        self.skip_layer_re = Layer(rng, self.reg_V, n_out, "tanh", self.learning_rate, self.batch_grad)
        # Hidden Layer, ae_size=n_hidden=200
        self.hiddenLayer = Layer(rng, ae_size, n_hidden, "tanh", self.learning_rate, self.batch_grad)
        # Output Layer
        self.outputLayer = Layer(rng, n_hidden, n_out, "tanh", self.learning_rate, self.batch_grad)

        # Lookup table for reg
        """
        reg_lookup_table_value = rng.uniform(low=-0.01, high=0.01, size=(self.reg_V, n_hidden))
        reg_lookup_table_value = np.asarray(reg_lookup_table_value, dtype=theano.config.floatX)
        self.reg_lookup_table = theano.shared(value=reg_lookup_table_value, name='rlt', borrow=True)
        self.learning_rate[self.reg_lookup_table] = theano.shared(value=np.ones(reg_lookup_table_value.shape,
                                                                                dtype=theano.config.floatX),
                                                                  borrow=True)
        self.batch_grad[self.reg_lookup_table] = theano.shared(value=np.zeros(reg_lookup_table_value.shape,
                                                                              dtype=theano.config.floatX), borrow=True)
        """
        reg_lookup_table_value = rng.uniform(low=-0.01, high=0.01, size=(self.reg_V, n_hidden))
        self.reg_lookup_table = tf.Variable(np.asarray(reg_lookup_table_value), dtype=tf.float64, name='rlt')
        self.learning_rate[self.reg_lookup_table]=tf.Variable(np.ones(reg_lookup_table_value.shape),dtype=tf.float64, name='learnrate')

        print (reg_lookup_table_value.shape)
        self.batch_grad[self.reg_lookup_table]=tf.Variable(np.zeros(reg_lookup_table_value.shape),dtype=tf.float64,name='batchgrad')
        self.params = self.hiddenLayer.params + self.outputLayer.params + self.skip_layer_ae.params + self.skip_layer_re.params + [
            self.reg_lookup_table]

        #sess.run(tf.initialize_all_variables()) 
開發者ID:georgeiswang,項目名稱:Query_Classfication_LSTM,代碼行數:61,代碼來源:dnn_lstm.py


注:本文中的layer.Layer方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。