当前位置: 首页>>代码示例>>Python>>正文


Python links.NStepLSTM方法代码示例

本文整理汇总了Python中chainer.links.NStepLSTM方法的典型用法代码示例。如果您正苦于以下问题:Python links.NStepLSTM方法的具体用法?Python links.NStepLSTM怎么用?Python links.NStepLSTM使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在chainer.links的用法示例。


在下文中一共展示了links.NStepLSTM方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: is_recurrent_link

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import NStepLSTM [as 别名]
def is_recurrent_link(layer):
    """Return True iff a given layer is recurrent and supported by ChainerRL.

    Args:
        layer (callable): Any callable object.

    Returns:
        bool: True iff a given layer is recurrent and supported by ChainerRL.
    """
    return isinstance(layer, (
        L.NStepLSTM,
        L.NStepGRU,
        L.NStepRNNReLU,
        L.NStepRNNTanh,
        StatelessRecurrent,
    )) 
开发者ID:chainer,项目名称:chainerrl,代码行数:18,代码来源:stateless_recurrent.py

示例2: mask_recurrent_state_at

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import NStepLSTM [as 别名]
def mask_recurrent_state_at(link, recurrent_state, indices):
    if recurrent_state is None:
        return None
    if isinstance(link, L.NStepLSTM):
        h, c = recurrent_state
        # shape: (n_layers, batch_size, out_size)
        assert h.ndim == 3
        assert c.ndim == 3
        mask = link.xp.ones_like(h.array)
        mask[:, indices] = 0
        c = c * mask
        h = h * mask
        return (h, c)
    if isinstance(link, (L.NStepGRU, L.NStepRNNReLU, L.NStepRNNTanh)):
        h = recurrent_state
        # shape: (n_layers, batch_size, out_size)
        assert h.ndim == 3
        mask = link.xp.ones_like(h.array)
        mask[:, indices] = 0
        h = h * mask
        return h
    if isinstance(link, StatelessRecurrent):
        return link.mask_recurrent_state_at(recurrent_state, indices)
    else:
        raise ValueError('{} is not a recurrent link'.format(link)) 
开发者ID:chainer,项目名称:chainerrl,代码行数:27,代码来源:stateless_recurrent.py

示例3: get_recurrent_state_at

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import NStepLSTM [as 别名]
def get_recurrent_state_at(link, recurrent_state, indices, unwrap_variable):
    if recurrent_state is None:
        return None
    if isinstance(link, L.NStepLSTM):
        h, c = recurrent_state
        if unwrap_variable:
            h = h.array
            c = c.array
        # shape: (n_layers, batch_size, out_size)
        assert h.ndim == 3
        assert c.ndim == 3
        return (h[:, indices], c[:, indices])
    if isinstance(link, (L.NStepGRU, L.NStepRNNReLU, L.NStepRNNTanh)):
        h = recurrent_state
        if unwrap_variable:
            h = h.array
        # shape: (n_layers, batch_size, out_size)
        assert h.ndim == 3
        return h[:, indices]
    if isinstance(link, StatelessRecurrent):
        return link.get_recurrent_state_at(
            recurrent_state, indices, unwrap_variable)
    else:
        raise ValueError('{} is not a recurrent link'.format(link)) 
开发者ID:chainer,项目名称:chainerrl,代码行数:26,代码来源:stateless_recurrent.py

示例4: __init__

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import NStepLSTM [as 别名]
def __init__(self, ch):
        super(Link_NStepLSTM, self).__init__(L.NStepLSTM(1, 1, 1, 0))

        hd = ch.children().__next__()
        if not(hd.w0 is None):
            self.n_in = hd.w0.shape[1]
        else:
            self.n_in = None

        self.out_size = ch.out_size
        self.n_layers = ch.n_layers
        self.dropout = ch.dropout

        self.ws = []
        self.bs = []
        for i in range(self.n_layers):
            ws = []
            bs = []
            for j in range(8):
                ws.append(helper.make_tensor_value_info(
                    ('/%d/w%d' % (i, j)), TensorProto.FLOAT, ["TODO"]))
                bs.append(helper.make_tensor_value_info(
                    ('/%d/b%d' % (i, j)), TensorProto.FLOAT, ["TODO"]))
            self.ws.append(ws)
            self.bs.append(bs) 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:27,代码来源:links.py

示例5: collect_inits

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import NStepLSTM [as 别名]
def collect_inits(lk, pathname):
    res = []
    for na, pa in lk.namedparams():
        if isinstance(pa.data, type(None)):
            continue
        if na.count('/') == 1:
            res.append((pathname + na, pa))

    if isinstance(lk, L.BatchNormalization):
        res.append((pathname + '/avg_mean', lk.avg_mean))
        # TODO(satos) このままだと、nodeのテストは通るがResNetのテストがつらい
        # lk.avg_var = np.ones(lk.avg_var.shape).astype(np.float32) * 4.0
        res.append((pathname + '/avg_var', lk.avg_var))

    elif isinstance(lk, L.NStepLSTM) or isinstance(lk, L.NStepBiLSTM):
        # 先にこちらで集めてしまう
        for i, clk in enumerate(lk.children()):
            for param in clk.params():
                res.append((pathname + '/%d/%s' % (i, param.name), param))
        return res

    for clk in lk.children():
        res += collect_inits(clk, pathname + '/' + clk.name)
    return res 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:26,代码来源:initializer.py

示例6: __init__

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import NStepLSTM [as 别名]
def __init__(self, n_layers, in_size, out_size, embed_size, hidden_size, proj_size, dropout=0.5):
        """Initialize encoder with structure parameters

        Args:
            n_layers (int): Number of layers.
            in_size (int): Dimensionality of input vectors.
            out_size (int): Dimensionality of output vectors.
            embed_size (int): Dimensionality of word embedding.
            hidden_size (int) : Dimensionality of hidden vectors.
            proj_size (int) : Dimensionality of projection before softmax.
            dropout (float): Dropout ratio.
        """
        super(LSTMDecoder, self).__init__(
            embed = L.EmbedID(in_size, embed_size),
            lstm = L.NStepLSTM(n_layers, embed_size, hidden_size, dropout),
            proj = L.Linear(hidden_size, proj_size),
            out = L.Linear(proj_size, out_size)
        )
        self.dropout = dropout
        for param in self.params():
            param.data[...] = np.random.uniform(-0.1, 0.1, param.data.shape) 
开发者ID:dialogtekgeek,项目名称:DSTC6-End-to-End-Conversation-Modeling,代码行数:23,代码来源:lstm_decoder.py

示例7: __init__

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import NStepLSTM [as 别名]
def __init__(self, n_layers, n_vocab, n_units, dropout=0.1, wv=None):
        super(RNNEncoder, self).__init__()
        with self.init_scope():
            if wv is None:
                self.embed = L.EmbedID(n_vocab, n_units, ignore_label=-1,
                                       initialW=embed_init)
            else:
                # TODO: this implementation was allowing for dynamic embeddings
                # think about how to support both continuous embeddings
                # and function pointers
                # self.embed = self.get_embed_from_wv
                self.embed = L.EmbedID(n_vocab, n_units, ignore_label=-1,
                                       initialW=wv)
            self.encoder = L.NStepLSTM(n_layers, n_units, n_units, dropout)
        self.n_layers = n_layers
        self.out_units = n_units
        self.dropout = dropout 
开发者ID:vecto-ai,项目名称:vecto,代码行数:19,代码来源:nets.py

示例8: __init__

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import NStepLSTM [as 别名]
def __init__(self, vocab_size, hidden_size, dropout_ratio, ignore_label):
        super(NStepLSTMLanguageModel, self).__init__()
        with self.init_scope():
            self.embed_word = L.EmbedID(
                vocab_size,
                hidden_size,
                initialW=initializers.Normal(1.0),
                ignore_label=ignore_label
            )
            self.embed_img = L.Linear(
                hidden_size,
                initialW=initializers.Normal(0.01)
            )
            self.lstm = L.NStepLSTM(1, hidden_size, hidden_size, dropout_ratio)
            self.decode_caption = L.Linear(
                hidden_size,
                vocab_size,
                initialW=initializers.Normal(0.01)
            )

        self.dropout_ratio = dropout_ratio 
开发者ID:chainer,项目名称:chainer,代码行数:23,代码来源:model.py

示例9: setUp

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import NStepLSTM [as 别名]
def setUp(self):
        shape = (self.n_layers, len(self.lengths), self.out_size)
        if self.hidden_none:
            self.h = self.c = numpy.zeros(shape, 'f')
        else:
            self.h = numpy.random.uniform(-1, 1, shape).astype('f')
            self.c = numpy.random.uniform(-1, 1, shape).astype('f')
        self.xs = [
            numpy.random.uniform(-1, 1, (l, self.in_size)).astype('f')
            for l in self.lengths]

        self.gh = numpy.random.uniform(-1, 1, shape).astype('f')
        self.gc = numpy.random.uniform(-1, 1, shape).astype('f')
        self.gys = [
            numpy.random.uniform(-1, 1, (l, self.out_size)).astype('f')
            for l in self.lengths]
        self.rnn = links.NStepLSTM(
            self.n_layers, self.in_size, self.out_size, self.dropout)

        for layer in self.rnn:
            for p in layer.params():
                p.array[...] = numpy.random.uniform(-1, 1, p.shape)
        self.rnn.cleargrads() 
开发者ID:chainer,项目名称:chainer,代码行数:25,代码来源:test_link_n_step_lstm.py

示例10: check_multi_gpu_forward

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import NStepLSTM [as 别名]
def check_multi_gpu_forward(self, train=True):
        # See chainer/chainer#6262
        # NStepLSTM w/ cudnn & dropout should work on not current device
        msg = None
        rnn = self.rnn.copy('copy')
        rnn.dropout = .5
        with cuda.get_device_from_id(1):
            if self.hidden_none:
                h = None
            else:
                h = cuda.to_gpu(self.h)
            c = cuda.to_gpu(self.c)
            xs = [cuda.to_gpu(x) for x in self.xs]
            with testing.assert_warns(DeprecationWarning):
                rnn = rnn.to_gpu()
        with cuda.get_device_from_id(0),\
                chainer.using_config('train', train),\
                chainer.using_config('use_cudnn', 'always'):
            try:
                rnn(h, c, xs)
            except Exception as e:
                msg = e
        assert msg is None 
开发者ID:chainer,项目名称:chainer,代码行数:25,代码来源:test_link_n_step_lstm.py

示例11: __init__

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import NStepLSTM [as 别名]
def __init__(self, indim, outdim, normfac, fl=400, fs=80, fftl=512, fbsize=400):
        self.indim = indim
        self.outdim = outdim
        self.fl = fl
        self.fs = fs
        self.fftl = fftl
        self.fbsize = fbsize
        self.normfac = {'input'  : {'mean' : cuda.to_gpu(normfac['input']['mean']),
                                    'std' : cupy.fmax(cuda.to_gpu(normfac['input']['std']), 1.0E-6)},
                        'output' : {'mean' : cuda.to_gpu(normfac['output']['mean']),
                                    'std' : cupy.fmax(cuda.to_gpu(normfac['output']['std']), 1.0E-6)}}
        super(Model, self).__init__()
        with self.init_scope():
            self.lx1 = L.NStepBiLSTM(1, self.indim, self.indim//2, 0.0)
            self.lx2 = L.Convolution2D(1, self.indim, (5, self.indim), (1, 1), (2, 0))
            self.ly1 = L.NStepLSTM(3, self.fbsize+self.indim, 256, 0.0)
            self.ly2 = L.Linear(256, self.outdim) 
开发者ID:nii-yamagishilab,项目名称:TSNetVocoder,代码行数:19,代码来源:model.py

示例12: make_distrib_recurrent_q_func

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import NStepLSTM [as 别名]
def make_distrib_recurrent_q_func(env):
    n_atoms = 51
    v_max = 10
    v_min = -10
    return chainerrl.links.StatelessRecurrentSequential(
        L.NStepLSTM(1, env.observation_space.low.size, 20, 0),
        chainerrl.q_functions.DistributionalFCStateQFunctionWithDiscreteAction(  # NOQA
            20, env.action_space.n,
            n_atoms=n_atoms,
            v_min=v_min,
            v_max=v_max,
            n_hidden_channels=None,
            n_hidden_layers=0,
        ),
    ) 
开发者ID:chainer,项目名称:chainerrl,代码行数:17,代码来源:test_double_categorical_dqn.py

示例13: _step_lstm

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import NStepLSTM [as 别名]
def _step_lstm(lstm, x, state):
    assert isinstance(lstm, L.NStepLSTM)
    assert len(lstm.ws) == 1
    assert len(lstm.bs) == 1
    assert len(lstm.ws[0]) == 8
    assert len(lstm.bs[0]) == 8
    if state is None or state[0] is None:
        xp = lstm.xp
        h = xp.zeros((len(x), lstm.out_size), dtype=np.float32)
        c = xp.zeros((len(x), lstm.out_size), dtype=np.float32)
    else:
        h, c = state
    h, c = _lstm(x, h, c, lstm.ws[0], lstm.bs[0])
    return h, (h, c) 
开发者ID:chainer,项目名称:chainerrl,代码行数:16,代码来源:test_stateless_recurrent_sequential.py

示例14: _test_three_recurrent_children

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import NStepLSTM [as 别名]
def _test_three_recurrent_children(self, gpu):
        # Test if https://github.com/chainer/chainer/issues/6053 is addressed
        in_size = 2
        out_size = 6

        rseq = StatelessRecurrentSequential(
            L.NStepLSTM(1, in_size, 3, 0),
            L.NStepGRU(2, 3, 4, 0),
            L.NStepRNNTanh(5, 4, out_size, 0),
        )

        if gpu >= 0:
            chainer.cuda.get_device_from_id(gpu).use()
            rseq.to_gpu()
        xp = rseq.xp

        seqs_x = [
            xp.random.uniform(-1, 1, size=(4, in_size)).astype(np.float32),
            xp.random.uniform(-1, 1, size=(1, in_size)).astype(np.float32),
            xp.random.uniform(-1, 1, size=(3, in_size)).astype(np.float32),
        ]

        # Make and load a recurrent state to check if the order is correct.
        _, rs = rseq.n_step_forward(seqs_x, None, output_mode='concat')
        _, _ = rseq.n_step_forward(seqs_x, rs, output_mode='concat')

        _, rs = rseq.n_step_forward(seqs_x, None, output_mode='split')
        _, _ = rseq.n_step_forward(seqs_x, rs, output_mode='split') 
开发者ID:chainer,项目名称:chainerrl,代码行数:30,代码来源:test_stateless_recurrent_sequential.py

示例15: __init__

# 需要导入模块: from chainer import links [as 别名]
# 或者: from chainer.links import NStepLSTM [as 别名]
def __init__(self, n_layers, n_vocab, embed_size, hidden_size, dropout=0.1):
        super(RNNEncoder, self).__init__()
        with self.init_scope():
            self.embed = L.EmbedID(n_vocab, embed_size, ignore_label=-1,
                    initialW=embed_init)
            self.rnn = L.NStepLSTM(n_layers, embed_size, hidden_size, dropout)
        self.n_layers = n_layers
        self.output_size = hidden_size
        self.dropout = dropout 
开发者ID:Pinafore,项目名称:qb,代码行数:11,代码来源:nets.py


注:本文中的chainer.links.NStepLSTM方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。