当前位置: 首页>>代码示例>>Python>>正文


Python links.LSTM属性代码示例

本文整理汇总了Python中chainer.links.LSTM属性的典型用法代码示例。如果您正苦于以下问题:Python links.LSTM属性的具体用法?Python links.LSTM怎么用?Python links.LSTM使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在chainer.links的用法示例。


在下文中一共展示了links.LSTM属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: make_model

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import LSTM [as 别名]
def make_model(self, env):
        n_dim_obs = env.observation_space.low.size
        n_dim_action = env.action_space.low.size
        n_hidden_channels = 50
        policy = Sequence(
            L.Linear(n_dim_obs, n_hidden_channels),
            F.relu,
            L.Linear(n_hidden_channels, n_hidden_channels),
            F.relu,
            L.LSTM(n_hidden_channels, n_hidden_channels),
            policies.FCGaussianPolicy(
                n_input_channels=n_hidden_channels,
                action_size=n_dim_action,
                min_action=env.action_space.low,
                max_action=env.action_space.high)
        )

        q_func = q_function.FCLSTMSAQFunction(
            n_dim_obs=n_dim_obs,
            n_dim_action=n_dim_action,
            n_hidden_layers=2,
            n_hidden_channels=n_hidden_channels)

        return chainer.Chain(policy=policy, q_function=q_func) 
开发者ID:chainer,项目名称:chainerrl,代码行数:26,代码来源:basetest_pgt.py

示例2: __init__

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import LSTM [as 别名]
def __init__(self, vocab, vocab_ngram_tokens, n_units, n_units_char, dropout,
                 subword):  # dropout ratio, zero indicates no dropout
        super(RNN, self).__init__()
        with self.init_scope():
            self.embed = L.EmbedID(
                len(vocab_ngram_tokens.lst_words) + 2, n_units_char,
                initialW=I.Uniform(1. / n_units_char))  # ngram tokens embedding  plus 2 for OOV and end symbol.
            if 'lstm' in subword:
                self.mid = L.LSTM(n_units_char, n_units_char * 2)
            self.out = L.Linear(n_units_char * 2, n_units_char)  # the feed-forward output layer
            if 'bilstm' in subword:
                self.mid_b = L.LSTM(n_units_char, n_units_char * 2)
                self.out_b = L.Linear(n_units_char * 2, n_units_char)

            self.n_ngram = vocab_ngram_tokens.metadata["max_gram"] - vocab_ngram_tokens.metadata["min_gram"] + 1
            self.final_out = L.Linear(n_units * (self.n_ngram), n_units)

            self.dropout = dropout
            self.vocab = vocab
            self.vocab_ngram_tokens = vocab_ngram_tokens
            self.subword = subword 
开发者ID:vecto-ai,项目名称:vecto,代码行数:23,代码来源:subword.py

示例3: __init__

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import LSTM [as 别名]
def __init__(self, vocab_size, hidden_size, dropout_ratio, ignore_label):
        super(LSTMLanguageModel, self).__init__()
        with self.init_scope():
            self.embed_word = L.EmbedID(
                vocab_size,
                hidden_size,
                initialW=initializers.Normal(1.0),
                ignore_label=ignore_label
            )
            self.embed_img = L.Linear(
                hidden_size,
                initialW=initializers.Normal(0.01)
            )
            self.lstm = L.LSTM(hidden_size, hidden_size)
            self.out_word = L.Linear(
                hidden_size,
                vocab_size,
                initialW=initializers.Normal(0.01)
            )

        self.dropout_ratio = dropout_ratio 
开发者ID:chainer,项目名称:chainer,代码行数:23,代码来源:model.py

示例4: step

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import LSTM [as 别名]
def step(self, hx, cx, xs):
        """Batch of word tokens to word tokens and hidden LSTM states.

        Predict the next set of tokens given previous tokens.
        """
        # Concatenate all input captions and pass them through the model in a
        # single pass
        caption_lens = [len(x) for x in xs]
        caption_sections = np.cumsum(caption_lens[:-1])
        xs = F.concat(xs, axis=0)

        xs = self.embed_word(xs)
        xs = F.split_axis(xs, caption_sections, axis=0)
        hx, cx, ys = self.lstm(hx, cx, xs)

        ys = F.concat(ys, axis=0)
        ys = F.dropout(ys, self.dropout_ratio)
        ys = self.decode_caption(ys)
        return hx, cx, ys 
开发者ID:chainer,项目名称:chainer,代码行数:21,代码来源:model.py

示例5: __init__

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import LSTM [as 别名]
def __init__(self, target_shape, num_labels, num_timesteps, uses_original_data=False, dropout_ratio=0.5, use_dropout=False):
        super(FSNSRecognitionNet, self).__init__()
        with self.init_scope():
            self.conv0 = L.Convolution2D(None, 32, 3, pad=1, stride=2)
            self.bn0 = L.BatchNormalization(32)
            self.conv1 = L.Convolution2D(32, 32, 3, pad=1)
            self.bn1 = L.BatchNormalization(32)
            self.rs1 = ResnetBlock(32, use_dropout=use_dropout, dropout_ratio=dropout_ratio)
            self.rs2 = ResnetBlock(64, filter_increase=True, use_dropout=use_dropout, dropout_ratio=dropout_ratio)
            self.rs3 = ResnetBlock(128, filter_increase=True, use_dropout=use_dropout, dropout_ratio=dropout_ratio)
            self.fc1 = L.Linear(None, 256)
            self.lstm = L.LSTM(None, 256)
            self.classifier = L.Linear(None, 134)

        self._train = True
        self.target_shape = target_shape
        self.num_labels = num_labels
        self.num_timesteps = num_timesteps
        self.uses_original_data = uses_original_data
        self.vis_anchor = None
        self.use_dropout = use_dropout
        self.dropout_ratio = dropout_ratio 
开发者ID:Bartzi,项目名称:see,代码行数:24,代码来源:fsns.py

示例6: __init__

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import LSTM [as 别名]
def __init__(self, target_shape, num_labels, num_timesteps, uses_original_data=False, dropout_ratio=0.5, use_dropout=False, use_blstm=False, use_attention=False):
        super().__init__()
        with self.init_scope():
            self.resnet = FSNSResNetLayers('', 152)
            self.fc1 = L.Linear(None, 512)
            self.lstm = L.LSTM(None, 512)
            if use_blstm:
                self.blstm = L.LSTM(None, 512)
            self.classifier = L.Linear(None, 134)

        self.target_shape = target_shape
        self.num_labels = num_labels
        self.num_timesteps = num_timesteps
        self.uses_original_data = uses_original_data
        self.vis_anchor = None
        self.use_dropout = use_dropout
        self.dropout_ratio = dropout_ratio
        self.use_blstm = use_blstm
        self.use_attention = use_attention 
开发者ID:Bartzi,项目名称:see,代码行数:21,代码来源:fsns_resnet.py

示例7: __init__

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import LSTM [as 别名]
def __init__(self, dropout_ratio, num_timesteps, zoom=0.9):
        super(SVHNLocalizationNet, self).__init__()
        with self.init_scope():
            self.conv0 = L.Convolution2D(None, 32, 3, pad=1)
            self.bn0 = L.BatchNormalization(32)
            self.rs1 = ResnetBlock(32)
            self.rs2 = ResnetBlock(48, filter_increase=True)
            self.rs3 = ResnetBlock(48)
            self.lstm = L.LSTM(None, 256)
            self.transform_2 = L.Linear(256, 6)

        # initialize transform
        self.transform_2.W.data[...] = 0

        transform_bias = self.transform_2.b.data
        transform_bias[[0, 4]] = zoom
        transform_bias[[2, 5]] = 0

        self.dropout_ratio = dropout_ratio
        self._train = True
        self.num_timesteps = num_timesteps
        self.vis_anchor = None

        self.width_encoding = None
        self.height_encoding = None 
开发者ID:Bartzi,项目名称:see,代码行数:27,代码来源:svhn.py

示例8: __init__

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import LSTM [as 别名]
def __init__(self, n_horses, n_jockeys):
		n_units_h, n_units_v, n_units_j, n_units_r, n_units_d = nn_nodes
		super(Turf_Tipster_NN, self).__init__()
		with self.init_scope():
			# 馬モデルのステータス(モデルを保存できるようにParameterで保持しておく)
			self.vc = chainer.Parameter(np.zeros((n_horses, n_units_v), dtype=np.float32))
			self.vh = chainer.Parameter(np.zeros((n_horses, n_units_v), dtype=np.float32))
			# 馬モデルのレイヤー
			self.le = L.EmbedID(n_horses, n_horses)
			self.l1 = L.Linear(n_horses, n_units_h)
			self.l2 = L.LSTM(n_units_h, n_units_v)
			# 騎手モデルのレイヤー
			self.re = L.EmbedID(n_jockeys, n_jockeys)
			self.r1 = L.Linear(n_jockeys, n_units_j)
			# レースモデルのレイヤー
			self.m1 = L.EmbedID(11, 11)
			self.m2 = L.EmbedID(8, 8)
			self.m3 = L.EmbedID(4, 4)
			self.m4 = L.EmbedID(4, 4)
			self.j1 = L.Linear(n_units_v + n_units_j + 11 + 8 + 4 + 4, n_units_r)
			self.j2 = L.Linear(n_units_r, n_units_r)
			self.j3 = L.Linear(n_units_r, n_units_d)

	# 引数は(レースメタ情報, グリッド情報)で着順になっている 
开发者ID:cocon-ai-group,项目名称:turf-tipster,代码行数:26,代码来源:rnn_network.py

示例9: __init__

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import LSTM [as 别名]
def __init__(self, n_actions):
        self.head = links.NIPSDQNHead()
        self.pi = policy.FCSoftmaxPolicy(
            self.head.n_output_channels, n_actions)
        self.v = v_function.FCVFunction(self.head.n_output_channels)
        self.lstm = L.LSTM(self.head.n_output_channels,
                           self.head.n_output_channels)
        super().__init__(self.head, self.lstm, self.pi, self.v) 
开发者ID:chainer,项目名称:chainerrl,代码行数:10,代码来源:train_a3c_ale.py

示例10: __init__

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import LSTM [as 别名]
def __init__(self, obs_size, action_size, hidden_size=200, lstm_size=128):
        self.pi_head = L.Linear(obs_size, hidden_size)
        self.v_head = L.Linear(obs_size, hidden_size)
        self.pi_lstm = L.LSTM(hidden_size, lstm_size)
        self.v_lstm = L.LSTM(hidden_size, lstm_size)
        self.pi = policies.FCGaussianPolicy(lstm_size, action_size)
        self.v = v_function.FCVFunction(lstm_size)
        super().__init__(self.pi_head, self.v_head,
                         self.pi_lstm, self.v_lstm, self.pi, self.v) 
开发者ID:chainer,项目名称:chainerrl,代码行数:11,代码来源:train_a3c_gym.py

示例11: __init__

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import LSTM [as 别名]
def __init__(self, n_dim_obs, n_dim_action, n_hidden_channels,
                 n_hidden_layers, nonlinearity=F.relu, last_wscale=1.):
        self.n_input_channels = n_dim_obs + n_dim_action
        self.n_hidden_layers = n_hidden_layers
        self.n_hidden_channels = n_hidden_channels
        self.nonlinearity = nonlinearity
        super().__init__()
        with self.init_scope():
            self.fc = MLP(self.n_input_channels, n_hidden_channels,
                          [self.n_hidden_channels] * self.n_hidden_layers,
                          nonlinearity=nonlinearity,
                          )
            self.lstm = L.LSTM(n_hidden_channels, n_hidden_channels)
            self.out = L.Linear(n_hidden_channels, 1,
                                initialW=LeCunNormal(last_wscale)) 
开发者ID:chainer,项目名称:chainerrl,代码行数:17,代码来源:state_action_q_functions.py

示例12: __init__

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import LSTM [as 别名]
def __init__(self, n_dim_obs, n_dim_action, n_hidden_channels,
                 n_hidden_layers):
        self.n_input_channels = n_dim_obs
        self.n_hidden_layers = n_hidden_layers
        self.n_hidden_channels = n_hidden_channels
        self.state_stack = []
        super().__init__()
        with self.init_scope():
            self.fc = MLP(in_size=self.n_input_channels,
                          out_size=n_hidden_channels,
                          hidden_sizes=[self.n_hidden_channels] *
                          self.n_hidden_layers)
            self.lstm = L.LSTM(n_hidden_channels, n_hidden_channels)
            self.out = L.Linear(n_hidden_channels, n_dim_action) 
开发者ID:chainer,项目名称:chainerrl,代码行数:16,代码来源:state_q_functions.py

示例13: __init__

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import LSTM [as 别名]
def __init__(self, vocab_size, embed_size, hidden_size, output_size):
        super(RNNModel, self).__init__()
        with self.init_scope():
            self.embed = L.EmbedID(vocab_size, embed_size)
            self.rnn = L.LSTM(embed_size, hidden_size)
            self.linear = L.Linear(hidden_size, output_size) 
开发者ID:Pinafore,项目名称:qb,代码行数:8,代码来源:main.py

示例14: parse_args

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import LSTM [as 别名]
def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--batch_size', '-b', type=int, default=32,
                        help='Number of examples in each mini-batch')
    parser.add_argument('--bproplen', '-l', type=int, default=35,
                        help='Number of words in each mini-batch '
                             '(= length of truncated BPTT)')
    parser.add_argument('--epoch', '-e', type=int, default=20,
                        help='Number of sweeps over the dataset to train')
    parser.add_argument('--gpu', '-g', type=int, default=0,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--gradclip', '-c', type=float, default=5,
                        help='Gradient norm threshold to clip')
    parser.add_argument('--out', '-o', default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume', '-r', default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--test', action='store_true',
                        help='Use tiny datasets for quick tests')
    parser.set_defaults(test=False)
    parser.add_argument('--hidden_size', type=int, default=300,
                        help='Number of LSTM units in each layer')
    parser.add_argument('--embed_size', type=int, default=300,
                        help='Size of embeddings')
    parser.add_argument('--model', '-m', default='model.npz',
                        help='Model file name to serialize')
    parser.add_argument('--glove', default='data/glove.6B.300d.txt',
                        help='Path to glove embedding file.')
    args = parser.parse_args()
    return args 
开发者ID:Pinafore,项目名称:qb,代码行数:32,代码来源:main.py

示例15: __init__

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import LSTM [as 别名]
def __init__(self, n_actions):
        self.head = dqn_head.NIPSDQNHead()
        self.pi = policy.FCSoftmaxPolicy(
            self.head.n_output_channels, n_actions)
        self.v = v_function.FCVFunction(self.head.n_output_channels)
        self.lstm = L.LSTM(self.head.n_output_channels,
                           self.head.n_output_channels)
        super().__init__(self.head, self.lstm, self.pi, self.v)
        init_like_torch(self) 
开发者ID:muupan,项目名称:async-rl,代码行数:11,代码来源:a3c_ale.py


注:本文中的chainer.links.LSTM属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。