当前位置: 首页>>代码示例>>Python>>正文


Python init.Uniform方法代码示例

本文整理汇总了Python中lasagne.init.Uniform方法的典型用法代码示例。如果您正苦于以下问题:Python init.Uniform方法的具体用法?Python init.Uniform怎么用?Python init.Uniform使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在lasagne.init的用法示例。


在下文中一共展示了init.Uniform方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from lasagne import init [as 别名]
# 或者: from lasagne.init import Uniform [as 别名]
def __init__(self, incoming, num_filters, filter_size, stride=(1, 1),
                 pad=0, untie_biases=False, groups=1,
                 W=init.Uniform(), b=init.Constant(0.),
                 nonlinearity=nl.rectify, flip_filters=True,
                 convolution=T.nnet.conv2d, filter_dilation=(1, 1), **kwargs):
        assert num_filters % groups == 0
        self.groups = groups
        super(GroupConv2DLayer, self).__init__(incoming, num_filters, filter_size,
                                               stride=stride, pad=pad,
                                               untie_biases=untie_biases,
                                               W=W, b=b,
                                               nonlinearity=nonlinearity,
                                               flip_filters=flip_filters,
                                               convolution=convolution,
                                               filter_dilation=filter_dilation,
                                               **kwargs) 
开发者ID:alexlee-gk,项目名称:visual_dynamics,代码行数:18,代码来源:layers_theano.py

示例2: __init__

# 需要导入模块: from lasagne import init [as 别名]
# 或者: from lasagne.init import Uniform [as 别名]
def __init__(self):
        super(FlatSpec, self).__init__(testval=init.Uniform(1)) 
开发者ID:ferrine,项目名称:gelato,代码行数:4,代码来源:dist.py

示例3: test_group_conv

# 需要导入模块: from lasagne import init [as 别名]
# 或者: from lasagne.init import Uniform [as 别名]
def test_group_conv(x_shape, num_filters, groups, batch_size=2):
    X_var = T.tensor4('X')
    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
    X = np.random.random((batch_size,) + x_shape).astype(theano.config.floatX)

    l_conv = LT.GroupConv2DLayer(l_x, num_filters, filter_size=3, stride=1, pad='same',
                                 untie_biases=True, groups=groups, nonlinearity=None,
                                 W=init.Uniform(), b=init.Uniform())
    conv_var = L.get_output(l_conv)
    conv_fn = theano.function([X_var], conv_var)
    tic()
    conv = conv_fn(X)
    toc("conv time for x_shape=%r, num_filters=%r, groups=%r, batch_size=%r\n\t" %
        (x_shape, num_filters, groups, batch_size))

    l_scan_conv = LT.ScanGroupConv2DLayer(l_x, num_filters, filter_size=3, stride=1, pad='same',
                                          untie_biases=True, groups=groups, nonlinearity=None,
                                          W=l_conv.W, b=l_conv.b)
    scan_conv_var = L.get_output(l_scan_conv)
    scan_conv_fn = theano.function([X_var], scan_conv_var)
    tic()
    scan_conv = scan_conv_fn(X)
    toc("scan_conv time for x_shape=%r, num_filters=%r, groups=%r, batch_size=%r\n\t" %
        (x_shape, num_filters, groups, batch_size))

    assert np.allclose(conv, scan_conv) 
开发者ID:alexlee-gk,项目名称:visual_dynamics,代码行数:28,代码来源:test_layers_theano.py

示例4: test_bilinear_group_conv

# 需要导入模块: from lasagne import init [as 别名]
# 或者: from lasagne.init import Uniform [as 别名]
def test_bilinear_group_conv(x_shape, u_shape, batch_size=2):
    X_var = T.tensor4('X')
    U_var = T.matrix('U')
    l_x = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
    l_u = L.InputLayer(shape=(None,) + u_shape, input_var=U_var, name='u')
    X = np.random.random((batch_size,) + x_shape).astype(theano.config.floatX)
    U = np.random.random((batch_size,) + u_shape).astype(theano.config.floatX)

    l_xu_outer = LT.OuterProductLayer([l_x, l_u])
    l_x_diff_pred = LT.GroupConv2DLayer(l_xu_outer, x_shape[0], filter_size=5, stride=1, pad='same',
                                        untie_biases=True, groups=x_shape[0], nonlinearity=None,
                                        W=init.Uniform(), b=init.Uniform())
    X_diff_pred_var = L.get_output(l_x_diff_pred)
    X_diff_pred_fn = theano.function([X_var, U_var], X_diff_pred_var)
    X_diff_pred = X_diff_pred_fn(X, U)

    u_dim, = u_shape
    l_x_convs = []
    for i in range(u_dim + 1):
        l_x_conv = LT.GroupConv2DLayer(l_x, x_shape[0], filter_size=5, stride=1, pad='same',
                                       untie_biases=True, groups=x_shape[0], nonlinearity=None,
                                       W=l_x_diff_pred.W.get_value()[:, i:i+1],
                                       b=l_x_diff_pred.b.get_value() if i == u_dim else None)
        l_x_convs.append(l_x_conv)
    l_x_diff_pred_bw = LT.BatchwiseSumLayer(l_x_convs + [l_u])
    X_diff_pred_bw_var = L.get_output(l_x_diff_pred_bw)
    X_diff_pred_bw_fn = theano.function([X_var, U_var], X_diff_pred_bw_var)
    X_diff_pred_bw = X_diff_pred_bw_fn(X, U)

    assert np.allclose(X_diff_pred, X_diff_pred_bw, atol=1e-7) 
开发者ID:alexlee-gk,项目名称:visual_dynamics,代码行数:32,代码来源:test_layers_theano.py

示例5: build_action_cond_encoder_net

# 需要导入模块: from lasagne import init [as 别名]
# 或者: from lasagne.init import Uniform [as 别名]
def build_action_cond_encoder_net(input_shapes, **kwargs):
    x_shape, u_shape = input_shapes

    X_var = T.tensor4('X')
    U_var = T.matrix('U')
    X_diff_var = T.tensor4('X_diff')
    X_next_var = X_var + X_diff_var

    l_x0 = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
    l_u = L.InputLayer(shape=(None,) + u_shape, input_var=U_var, name='u')

    l_x1 = L.Conv2DLayer(l_x0, 64, filter_size=6, stride=2, pad=0,
                         nonlinearity=nl.rectify,
                         name='x1')
    l_x2 = L.Conv2DLayer(l_x1, 64, filter_size=6, stride=2, pad=2,
                         nonlinearity=nl.rectify,
                         name='x2')
    l_x3 = L.Conv2DLayer(l_x2, 64, filter_size=6, stride=2, pad=2,
                         nonlinearity=nl.rectify,
                         name='x3')
    l_x3_shape = lasagne.layers.get_output_shape(l_x3)

    l_y4 = L.DenseLayer(l_x3, 1024, nonlinearity=nl.rectify, name='y')
    l_y4d = L.DenseLayer(l_y4, 2048, W=init.Uniform(1.0), nonlinearity=None)
    l_ud = L.DenseLayer(l_u, 2048, W=init.Uniform(0.1), nonlinearity=None)

    l_y4d_diff_pred = L.ElemwiseMergeLayer([l_y4d, l_ud], T.mul)
    l_y4_diff_pred = L.DenseLayer(l_y4d_diff_pred, 1024, W=init.Uniform(1.0), nonlinearity=None, name='y_diff_pred')

    l_y4_next_pred = L.ElemwiseMergeLayer([l_y4, l_y4_diff_pred], T.add, name='y_next_pred')

    l_y3_next_pred = L.DenseLayer(l_y4_next_pred, np.prod(l_x3_shape[1:]), nonlinearity=nl.rectify)
    l_x3_next_pred = L.ReshapeLayer(l_y3_next_pred, ([0],) + l_x3_shape[1:],
                                   name='x3_next_pred')

    l_x2_next_pred = LT.Deconv2DLayer(l_x3_next_pred, 64, filter_size=6, stride=2, pad=2,
                                   nonlinearity=nl.rectify,
                                   name='x2_next_pred')
    l_x1_next_pred = LT.Deconv2DLayer(l_x2_next_pred, 64, filter_size=6, stride=2, pad=2,
                                   nonlinearity=nl.rectify,
                                   name='x1_next_pred')
    l_x0_next_pred = LT.Deconv2DLayer(l_x1_next_pred, 3, filter_size=6, stride=2, pad=0,
                                   nonlinearity=None,
                                   name='x0_next_pred')

    loss_fn = lambda X, X_pred: ((X - X_pred) ** 2).mean(axis=0).sum() / 2.
    loss = loss_fn(X_next_var, lasagne.layers.get_output(l_x0_next_pred))

    net_name = 'ActionCondEncoderNet'
    input_vars = OrderedDict([(var.name, var) for var in [X_var, U_var, X_diff_var]])
    pred_layers = OrderedDict([('x0_next_pred', l_x0_next_pred)])
    return net_name, input_vars, pred_layers, loss 
开发者ID:alexlee-gk,项目名称:visual_dynamics,代码行数:54,代码来源:net_theano.py

示例6: __init__

# 需要导入模块: from lasagne import init [as 别名]
# 或者: from lasagne.init import Uniform [as 别名]
def __init__(self, input_layer, num_filters, filter_size, strides=(1, 1), border_mode=None, untie_biases=False,
                 W=init.Uniform(), b=init.Constant(0.), nonlinearity=nonlinearities.rectify, pad=None,
                 flip_filters=False):
        super(Conv2DDNNLayer, self).__init__(input_layer)
        if nonlinearity is None:
            self.nonlinearity = nonlinearities.identity
        else:
            self.nonlinearity = nonlinearity

        self.num_filters = num_filters
        self.filter_size = filter_size
        if isinstance(strides, int):
            strides = (strides, strides)
        self.strides = strides
        self.untie_biases = untie_biases
        self.flip_filters = flip_filters

        if border_mode is not None and pad is not None:
            raise RuntimeError("You cannot specify both 'border_mode' and 'pad'. To avoid ambiguity, please specify only one of them.")
        elif border_mode is None and pad is None:
            # no option specified, default to valid mode
            self.pad = (0, 0)
            self.border_mode = 'valid'
        elif border_mode is not None:
            if border_mode == 'valid':
                self.pad = (0, 0)
                self.border_mode = 'valid'
            elif border_mode == 'full':
                self.pad = (self.filter_size[0] - 1, self.filter_size[1] - 1)
                self.border_mode = 'full'
            elif border_mode == 'same':
                # dnn_conv does not support same, so we just specify padding directly.
                # only works for odd filter size, but the even filter size case is probably not worth supporting.
                self.pad = ((self.filter_size[0] - 1) // 2, (self.filter_size[1] - 1) // 2)
                self.border_mode = None
            else:
                raise RuntimeError("Unsupported border_mode for Conv2DDNNLayer: %s" % border_mode)
        else:
            if isinstance(pad, int):
                pad = (pad, pad)
            self.pad = pad

        self.W = self.create_param(W, self.get_W_shape())
        if b is None:
            self.b = None
        elif self.untie_biases:
            output_shape = self.get_output_shape()
            self.b = self.create_param(b, (num_filters, output_shape[2], output_shape[3]))
        else:
            self.b = self.create_param(b, (num_filters,)) 
开发者ID:benanne,项目名称:kaggle-ndsb,代码行数:52,代码来源:tmp_dnn.py


注:本文中的lasagne.init.Uniform方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。