本文整理汇总了Python中mlp.MLP属性的典型用法代码示例。如果您正苦于以下问题:Python mlp.MLP属性的具体用法?Python mlp.MLP怎么用?Python mlp.MLP使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类mlp
的用法示例。
在下文中一共展示了mlp.MLP属性的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: print_utility
# 需要导入模块: import mlp [as 别名]
# 或者: from mlp import MLP [as 别名]
def print_utility(my_mlp, tot_rows, tot_cols, decimal=2, flip=True):
'''Print on terminal the utility matrix of a discrete state space
having states defined by tuples: (0,0); (0,1); (0,2) ...
@param my_mlp an MLP object having single output
@param tot_rows total number of rows
@param tot_cols total number of columns
@param decimal is the precision of the printing (default: 2 decimal places)
@param flip boolean which defines if vertical flip is applied (default: True)
'''
utility_matrix = np.zeros((tot_rows, tot_cols))
for row in range(tot_rows):
for col in range(tot_cols):
x = np.array([row, col], dtype=np.float32)
utility_matrix[row,col] = my_mlp.forward(x)
np.set_printoptions(precision=decimal) #set print precision of numpy
if flip:
print(np.flipud(utility_matrix))
else:
print(utility_matrix)
np.set_printoptions(precision=8) #reset to default
示例2: __init__
# 需要导入模块: import mlp [as 别名]
# 或者: from mlp import MLP [as 别名]
def __init__(self,
input_shape,
output_size,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.relu,
output_nonlinearity=tf.nn.tanh):
self.input_shape = input_shape
self.output_size = output_size
self.locals = locals()
with tf.variable_scope("policy"):
self.mlp = MLP(input_shape=input_shape,
output_size=output_size,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=output_nonlinearity)
self.x = self.mlp.get_input_layer()
self.y = self.mlp.get_output_layer()
示例3: main
# 需要导入模块: import mlp [as 别名]
# 或者: from mlp import MLP [as 别名]
def main():
env = init_env()
my_mlp = MLP(tot_inputs=2, tot_hidden=2, tot_outputs=1, activation="tanh")
learning_rate = 0.1
gamma = 0.9
tot_epoch = 10001
print_epoch = 100
for epoch in range(tot_epoch):
#XOR-world episode
observation = env.reset(exploring_starts=True)
#The episode starts here
for step in range(1000):
action = np.random.randint(0,4)
new_observation, reward, done = env.step(action) #move in the world and get the state and reward
my_mlp, error = update(my_mlp, new_observation, reward, learning_rate, gamma, done)
observation = new_observation
if done: break
if(epoch % print_epoch == 0 and epoch!=0):
print("")
print("Epoch: " + str(epoch+1))
print("Tot steps: " + str(step))
print("Error: " + str(error))
print_utility(my_mlp, tot_rows=5, tot_cols=5)
print("Generating plot, please wait...")
subplot(my_mlp, world_size=5, filename="xor_planes.png")
print("Done!")
示例4: __init__
# 需要导入模块: import mlp [as 别名]
# 或者: from mlp import MLP [as 别名]
def __init__(self,
input_shape,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh,
learning_rate=3e-4,
batch_size=1000):
self.input_shape = input_shape
self.hidden_sizes = hidden_sizes
self.learning_rate = learning_rate
self.batch_size = batch_size
self.sess = None
with tf.variable_scope("mlp_fitting"):
self.mlp = MLP(input_shape=input_shape,
output_size=1,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=None,
name='value')
self.x = self.mlp.get_input_layer()
self.y = tf.reshape(self.mlp.get_output_layer(), shape=(-1,))
self.params = self.mlp.get_params()
self.z = tf.placeholder(dtype=tf.float32, shape=(None,), name='z')
loss = tf.reduce_mean(tf.square(self.z - self.y))
self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(loss, var_list=self.params)
示例5: __init__
# 需要导入模块: import mlp [as 别名]
# 或者: from mlp import MLP [as 别名]
def __init__(self,
input_shape,
output_size,
hidden_sizes=(32, 32),
hidden_nonlinearity=tf.nn.tanh):
self.input_shape = input_shape
self.output_size = output_size
self.hidden_sizes = hidden_sizes
self.locals = locals()
self.distribution = Categorical(output_size)
self.params = []
with tf.variable_scope("policy"):
# Mean network
self.prob_mlp = MLP(input_shape=input_shape,
output_size=output_size,
hidden_sizes=hidden_sizes,
hidden_nonlinearity=hidden_nonlinearity,
output_nonlinearity=tf.nn.softmax,
name='prob')
self.x = self.prob_mlp.get_input_layer()
self.prob = self.prob_mlp.get_output_layer()
self.params += self.prob_mlp.get_params()
示例6: __init__
# 需要导入模块: import mlp [as 别名]
# 或者: from mlp import MLP [as 别名]
def __init__(self,
mlp_hidden_dim,
num_mlp_layers,
num_classes,
embeddings,
gpu=False):
super(DanClassifier, self).__init__()
self.to_cuda = to_cuda(gpu)
self.embeddings = embeddings
self.word_dim = len(embeddings[0])
self.mlp = MLP(self.word_dim,
mlp_hidden_dim,
num_mlp_layers,
num_classes)
print("# params:", sum(p.nelement() for p in self.parameters()))
示例7: forward
# 需要导入模块: import mlp [as 别名]
# 或者: from mlp import MLP [as 别名]
def forward(self, batch, debug=0, dropout=None):
""" Average all word vectors in the doc, and feed into an MLP """
docs_vectors = [
torch.index_select(batch.embeddings_matrix, 1, doc)
for doc in batch.docs
]
# don't need to mask docs because padding vector is 0, won't change sum
word_vector_sum = torch.sum(torch.stack(docs_vectors), dim=2)
word_vector_avg = \
torch.div(word_vector_sum.t(),
torch.autograd.Variable(batch.doc_lens.float())).t()
return self.mlp.forward(word_vector_avg)
示例8: __init__
# 需要导入模块: import mlp [as 别名]
# 或者: from mlp import MLP [as 别名]
def __init__(self,
input_dim,
hidden_dim,
num_layers,
output_dim,
window_size,
gpu=False):
super(Cnn, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.output_dim = output_dim
self.window_size = window_size
self.gpu = gpu
if not num_layers <= 1:
self.cnn = \
Conv1d(input_dim,
hidden_dim,
window_size)
self.mlp = \
MLP(hidden_dim,
hidden_dim,
num_layers - 1,
output_dim)
else:
self.cnn = \
Conv1d(input_dim,
output_dim,
window_size)
self.mlp = None
示例9: cnn_arg_parser
# 需要导入模块: import mlp [as 别名]
# 或者: from mlp import MLP [as 别名]
def cnn_arg_parser():
""" CLI args related to the MLP module """
p = ArgumentParser(add_help=False)
# we're running out of letters!
p.add_argument("-c", "--cnn_hidden_dim", help="CNN hidden dimension", type=int, default=200)
p.add_argument("-x", "--num_cnn_layers", help="Number of MLP layers", type=int, default=2)
p.add_argument("-z", "--window_size", help="Size of window of CNN", type=int, default=3)
p.add_argument("-o", "--pooling", help="Type of pooling to use [max, sum, avg]", type=str, default="max")
return p
示例10: __init__
# 需要导入模块: import mlp [as 别名]
# 或者: from mlp import MLP [as 别名]
def __init__(self, opt):
super(ABC, self).__init__()
self.vid_flag = "imagenet" in opt.input_streams
self.sub_flag = "sub" in opt.input_streams
self.vcpt_flag = "vcpt" in opt.input_streams
hidden_size_1 = opt.hsz1
hidden_size_2 = opt.hsz2
n_layers_cls = opt.n_layers_cls
vid_feat_size = opt.vid_feat_size
embedding_size = opt.embedding_size
vocab_size = opt.vocab_size
self.embedding = nn.Embedding(vocab_size, embedding_size)
self.bidaf = BidafAttn(hidden_size_1 * 3, method="dot") # no parameter for dot
self.lstm_raw = RNNEncoder(300, hidden_size_1, bidirectional=True, dropout_p=0, n_layers=1, rnn_type="lstm")
if self.vid_flag:
print("activate video stream")
self.video_fc = nn.Sequential(
nn.Dropout(0.5),
nn.Linear(vid_feat_size, embedding_size),
nn.Tanh(),
)
self.lstm_mature_vid = RNNEncoder(hidden_size_1 * 2 * 5, hidden_size_2, bidirectional=True,
dropout_p=0, n_layers=1, rnn_type="lstm")
self.classifier_vid = MLP(hidden_size_2*2, 1, 500, n_layers_cls)
if self.sub_flag:
print("activate sub stream")
self.lstm_mature_sub = RNNEncoder(hidden_size_1 * 2 * 5, hidden_size_2, bidirectional=True,
dropout_p=0, n_layers=1, rnn_type="lstm")
self.classifier_sub = MLP(hidden_size_2*2, 1, 500, n_layers_cls)
if self.vcpt_flag:
print("activate vcpt stream")
self.lstm_mature_vcpt = RNNEncoder(hidden_size_1 * 2 * 5, hidden_size_2, bidirectional=True,
dropout_p=0, n_layers=1, rnn_type="lstm")
self.classifier_vcpt = MLP(hidden_size_2*2, 1, 500, n_layers_cls)
示例11: declare_parameters
# 需要导入模块: import mlp [as 别名]
# 或者: from mlp import MLP [as 别名]
def declare_parameters(self):
opts = self.opts
with tf.variable_scope('params') as scope:
self.L_init = tf.get_variable(name="L_init", initializer=tf.random_normal([1, self.opts.d]))
self.C_init = tf.get_variable(name="C_init", initializer=tf.random_normal([1, self.opts.d]))
self.LC_msg = MLP(opts, opts.d, repeat_end(opts.d, opts.n_msg_layers, opts.d), name=("LC_msg"))
self.CL_msg = MLP(opts, opts.d, repeat_end(opts.d, opts.n_msg_layers, opts.d), name=("CL_msg"))
self.L_update = tf.contrib.rnn.LayerNormBasicLSTMCell(self.opts.d, activation=decode_transfer_fn(opts.lstm_transfer_fn))
self.C_update = tf.contrib.rnn.LayerNormBasicLSTMCell(self.opts.d, activation=decode_transfer_fn(opts.lstm_transfer_fn))
self.L_vote = MLP(opts, opts.d, repeat_end(opts.d, opts.n_vote_layers, 1), name=("L_vote"))
self.vote_bias = tf.get_variable(name="vote_bias", shape=[], initializer=tf.zeros_initializer())
示例12: run_experiment
# 需要导入模块: import mlp [as 别名]
# 或者: from mlp import MLP [as 别名]
def run_experiment(x_train, y_train, x_valid, y_valid, embeddings, _layers):
# Model parameters
model_name = "mlp"
layers = _layers
# Training parameters
learning_rate = 1e-3 # learning rate
batch_size = 64 # batch size
num_epochs = args.epochs # no. of training epochs
# Regularization parameters
dropout_keep_prob = 0.5 # dropout keep probability
l2_reg_lambda = 0.0 # L2 regularization lambda
# Training
# ==================================================
with tf.Graph().as_default():
tf.set_random_seed(42) # set random seed for consistent initialization(s)
session_conf = tf.ConfigProto(allow_soft_placement=True,
log_device_placement=False)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Init model
mlp = MLP(vocab_size=len(train.vocab),
num_classes=len(train.class_names),
layers=layers,
l2_reg_lambda=l2_reg_lambda)
# Convert sparse matrices to arrays
x_train = x_train.toarray()
x_valid = x_valid.toarray()
# Output directory for models and summaries
timestamp = str(int(time.time()))
out_dir = os.path.abspath(os.path.join(os.path.curdir, "runs", args.dataset, model_name,
timestamp))
# Train and test model
max_accuracy = train_and_test(sess, mlp, x_train, y_train, x_valid, y_valid, learning_rate,
batch_size, num_epochs, dropout_keep_prob, out_dir)
return timestamp, max_accuracy
# Data Preparation
# ==================================================
示例13: forward
# 需要导入模块: import mlp [as 别名]
# 或者: from mlp import MLP [as 别名]
def forward(self, batch, debug=0, dropout=None):
docs = batch.docs
doc_lens = batch.doc_lens
b = len(docs)
max_doc_len = max(list(doc_lens))
docs_vectors = \
torch.stack(
[
torch.index_select(batch.embeddings_matrix, 1, doc)
for doc in docs
],
dim=0
)
# right pad, so docs are at least as long as self.window_size
doc_lens = [max(self.window_size, l) for l in doc_lens]
if max_doc_len < self.window_size:
print("max doc length {} is smaller than window size {}".format(max_doc_len, self.window_size))
docs_vectors = \
torch.cat(
(docs_vectors, torch.zeros(b, self.window_size - max_doc_len)),
dim=1
)
max_doc_len = self.window_size
cnn_outs = self.cnn.forward(docs_vectors) # size: (b, hidden_dim, max_doc_len - window_size + 1)
num_windows_per_doc = cnn_outs.size()[2]
assert(num_windows_per_doc == max_doc_len - self.window_size + 1)
if dropout is not None:
cnn_outs = dropout(cnn_outs)
if self.num_layers <= 1:
result = cnn_outs.permute(2, 0, 1)
else:
# reshape so all windows can be passed into MLP
cnn_outs = \
cnn_outs.transpose(1, 2).contiguous().view(b * num_windows_per_doc, self.hidden_dim)
# run MLP on all windows
cnn_outs = relu(cnn_outs)
mlp_outs = self.mlp.forward(cnn_outs)
# size: (max_doc_len - window_size + 1, b, hidden_dim)
result = mlp_outs.view(b, num_windows_per_doc, self.hidden_dim).transpose(0, 1)
lengths = [max(0, l - self.window_size + 1) for l in doc_lens]
# pack to get rid of the parts that are past the end of the doc
return pack_padded_sequence(
result,
lengths=lengths
)