當前位置: 首頁>>代碼示例>>Python>>正文


Python init.Uniform方法代碼示例

本文整理匯總了Python中lasagne.init.Uniform方法的典型用法代碼示例。如果您正苦於以下問題:Python init.Uniform方法的具體用法?Python init.Uniform怎麽用?Python init.Uniform使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在lasagne.init的用法示例。


在下文中一共展示了init.Uniform方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from lasagne import init [as 別名]
# 或者: from lasagne.init import Uniform [as 別名]
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1),
                 pad=0, untie_biases=False, groups=1,
                 W=init.Uniform(), b=init.Constant(0.),
                 nonlinearity=nl.rectify, flip_filters=True,
                 convolution=T.nnet.conv2d, filter_dilation=(1, 1), **kwargs):
        assert num_filters % groups == 0
        self.groups = groups
        super(GroupConv2DLayer, self).__init__(incoming, num_filters, filter_size,
                                               stride=stride, pad=pad,
                                               untie_biases=untie_biases,
                                               W=W, b=b,
                                               nonlinearity=nonlinearity,
                                               flip_filters=flip_filters,
                                               convolution=convolution,
                                               filter_dilation=filter_dilation,
                                               **kwargs) 
開發者ID:alexlee-gk,項目名稱:visual_dynamics,代碼行數:18,代碼來源:layers_theano.py

示例2: __init__

# 需要導入模塊: from lasagne import init [as 別名]
# 或者: from lasagne.init import Uniform [as 別名]
def __init__(self):
        super(FlatSpec, self).__init__(testval=init.Uniform(1)) 
開發者ID:ferrine,項目名稱:gelato,代碼行數:4,代碼來源:dist.py

示例3: test_group_conv

# 需要導入模塊: from lasagne import init [as 別名]
# 或者: from lasagne.init import Uniform [as 別名]
def test_group_conv(x_shape, num_filters, groups, batch_size=2):
    X_var = T.tensor4('X')
    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
    X = np.random.random((batch_size,) + x_shape).astype(theano.config.floatX)

    l_conv = LT.GroupConv2DLayer(l_x, num_filters, filter_size=3, stride=1, pad='same',
                                 untie_biases=True, groups=groups, nonlinearity=None,
                                 W=init.Uniform(), b=init.Uniform())
    conv_var = L.get_output(l_conv)
    conv_fn = theano.function([X_var], conv_var)
    tic()
    conv = conv_fn(X)
    toc("conv time for x_shape=%r, num_filters=%r, groups=%r, batch_size=%r\n\t" %
        (x_shape, num_filters, groups, batch_size))

    l_scan_conv = LT.ScanGroupConv2DLayer(l_x, num_filters, filter_size=3, stride=1, pad='same',
                                          untie_biases=True, groups=groups, nonlinearity=None,
                                          W=l_conv.W, b=l_conv.b)
    scan_conv_var = L.get_output(l_scan_conv)
    scan_conv_fn = theano.function([X_var], scan_conv_var)
    tic()
    scan_conv = scan_conv_fn(X)
    toc("scan_conv time for x_shape=%r, num_filters=%r, groups=%r, batch_size=%r\n\t" %
        (x_shape, num_filters, groups, batch_size))

    assert np.allclose(conv, scan_conv) 
開發者ID:alexlee-gk,項目名稱:visual_dynamics,代碼行數:28,代碼來源:test_layers_theano.py

示例4: test_bilinear_group_conv

# 需要導入模塊: from lasagne import init [as 別名]
# 或者: from lasagne.init import Uniform [as 別名]
def test_bilinear_group_conv(x_shape, u_shape, batch_size=2):
    X_var = T.tensor4('X')
    U_var = T.matrix('U')
    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
    l_u = L.InputLayer(shape=(None,) + u_shape, input_var=U_var, name='u')
    X = np.random.random((batch_size,) + x_shape).astype(theano.config.floatX)
    U = np.random.random((batch_size,) + u_shape).astype(theano.config.floatX)

    l_xu_outer = LT.OuterProductLayer([l_x, l_u])
    l_x_diff_pred = LT.GroupConv2DLayer(l_xu_outer, x_shape[0], filter_size=5, stride=1, pad='same',
                                        untie_biases=True, groups=x_shape[0], nonlinearity=None,
                                        W=init.Uniform(), b=init.Uniform())
    X_diff_pred_var = L.get_output(l_x_diff_pred)
    X_diff_pred_fn = theano.function([X_var, U_var], X_diff_pred_var)
    X_diff_pred = X_diff_pred_fn(X, U)

    u_dim, = u_shape
    l_x_convs = []
    for i in range(u_dim + 1):
        l_x_conv = LT.GroupConv2DLayer(l_x, x_shape[0], filter_size=5, stride=1, pad='same',
                                       untie_biases=True, groups=x_shape[0], nonlinearity=None,
                                       W=l_x_diff_pred.W.get_value()[:, i:i+1],
                                       b=l_x_diff_pred.b.get_value() if i == u_dim else None)
        l_x_convs.append(l_x_conv)
    l_x_diff_pred_bw = LT.BatchwiseSumLayer(l_x_convs + [l_u])
    X_diff_pred_bw_var = L.get_output(l_x_diff_pred_bw)
    X_diff_pred_bw_fn = theano.function([X_var, U_var], X_diff_pred_bw_var)
    X_diff_pred_bw = X_diff_pred_bw_fn(X, U)

    assert np.allclose(X_diff_pred, X_diff_pred_bw, atol=1e-7) 
開發者ID:alexlee-gk,項目名稱:visual_dynamics,代碼行數:32,代碼來源:test_layers_theano.py

示例5: build_action_cond_encoder_net

# 需要導入模塊: from lasagne import init [as 別名]
# 或者: from lasagne.init import Uniform [as 別名]
def build_action_cond_encoder_net(input_shapes, **kwargs):
    x_shape, u_shape = input_shapes

    X_var = T.tensor4('X')
    U_var = T.matrix('U')
    X_diff_var = T.tensor4('X_diff')
    X_next_var = X_var + X_diff_var

    l_x0 = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
    l_u = L.InputLayer(shape=(None,) + u_shape, input_var=U_var, name='u')

    l_x1 = L.Conv2DLayer(l_x0, 64, filter_size=6, stride=2, pad=0,
                         nonlinearity=nl.rectify,
                         name='x1')
    l_x2 = L.Conv2DLayer(l_x1, 64, filter_size=6, stride=2, pad=2,
                         nonlinearity=nl.rectify,
                         name='x2')
    l_x3 = L.Conv2DLayer(l_x2, 64, filter_size=6, stride=2, pad=2,
                         nonlinearity=nl.rectify,
                         name='x3')
    l_x3_shape = lasagne.layers.get_output_shape(l_x3)

    l_y4 = L.DenseLayer(l_x3, 1024, nonlinearity=nl.rectify, name='y')
    l_y4d = L.DenseLayer(l_y4, 2048, W=init.Uniform(1.0), nonlinearity=None)
    l_ud = L.DenseLayer(l_u, 2048, W=init.Uniform(0.1), nonlinearity=None)

    l_y4d_diff_pred = L.ElemwiseMergeLayer([l_y4d, l_ud], T.mul)
    l_y4_diff_pred = L.DenseLayer(l_y4d_diff_pred, 1024, W=init.Uniform(1.0), nonlinearity=None, name='y_diff_pred')

    l_y4_next_pred = L.ElemwiseMergeLayer([l_y4, l_y4_diff_pred], T.add, name='y_next_pred')

    l_y3_next_pred = L.DenseLayer(l_y4_next_pred, np.prod(l_x3_shape[1:]), nonlinearity=nl.rectify)
    l_x3_next_pred = L.ReshapeLayer(l_y3_next_pred, ([0],) + l_x3_shape[1:],
                                   name='x3_next_pred')

    l_x2_next_pred = LT.Deconv2DLayer(l_x3_next_pred, 64, filter_size=6, stride=2, pad=2,
                                   nonlinearity=nl.rectify,
                                   name='x2_next_pred')
    l_x1_next_pred = LT.Deconv2DLayer(l_x2_next_pred, 64, filter_size=6, stride=2, pad=2,
                                   nonlinearity=nl.rectify,
                                   name='x1_next_pred')
    l_x0_next_pred = LT.Deconv2DLayer(l_x1_next_pred, 3, filter_size=6, stride=2, pad=0,
                                   nonlinearity=None,
                                   name='x0_next_pred')

    loss_fn = lambda X, X_pred: ((X - X_pred) ** 2).mean(axis=0).sum() / 2.
    loss = loss_fn(X_next_var, lasagne.layers.get_output(l_x0_next_pred))

    net_name = 'ActionCondEncoderNet'
    input_vars = OrderedDict([(var.name, var) for var in [X_var, U_var, X_diff_var]])
    pred_layers = OrderedDict([('x0_next_pred', l_x0_next_pred)])
    return net_name, input_vars, pred_layers, loss 
開發者ID:alexlee-gk,項目名稱:visual_dynamics,代碼行數:54,代碼來源:net_theano.py

示例6: __init__

# 需要導入模塊: from lasagne import init [as 別名]
# 或者: from lasagne.init import Uniform [as 別名]
def __init__(self, input_layer, num_filters, filter_size, strides=(1, 1), border_mode=None, untie_biases=False,
                 W=init.Uniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, pad=None,
                 flip_filters=False):
        super(Conv2DDNNLayer, self).__init__(input_layer)
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.num_filters = num_filters
        self.filter_size = filter_size
        if isinstance(strides, int):
            strides = (strides, strides)
        self.strides = strides
        self.untie_biases = untie_biases
        self.flip_filters = flip_filters

        if border_mode is not None and pad is not None:
            raise RuntimeError("You cannot specify both 'border_mode' and 'pad'. To avoid ambiguity, please specify only one of them.")
        elif border_mode is None and pad is None:
            # no option specified, default to valid mode
            self.pad = (0, 0)
            self.border_mode = 'valid'
        elif border_mode is not None:
            if border_mode == 'valid':
                self.pad = (0, 0)
                self.border_mode = 'valid'
            elif border_mode == 'full':
                self.pad = (self.filter_size[0] - 1, self.filter_size[1] - 1)
                self.border_mode = 'full'
            elif border_mode == 'same':
                # dnn_conv does not support same, so we just specify padding directly.
                # only works for odd filter size, but the even filter size case is probably not worth supporting.
                self.pad = ((self.filter_size[0] - 1) // 2, (self.filter_size[1] - 1) // 2)
                self.border_mode = None
            else:
                raise RuntimeError("Unsupported border_mode for Conv2DDNNLayer: %s" % border_mode)
        else:
            if isinstance(pad, int):
                pad = (pad, pad)
            self.pad = pad

        self.W = self.create_param(W, self.get_W_shape())
        if b is None:
            self.b = None
        elif self.untie_biases:
            output_shape = self.get_output_shape()
            self.b = self.create_param(b, (num_filters, output_shape[2], output_shape[3]))
        else:
            self.b = self.create_param(b, (num_filters,)) 
開發者ID:benanne,項目名稱:kaggle-ndsb,代碼行數:52,代碼來源:tmp_dnn.py


注:本文中的lasagne.init.Uniform方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。