当前位置: 首页>>代码示例>>Python>>正文


Python layers.get_output_shape方法代码示例

本文整理汇总了Python中lasagne.layers.get_output_shape方法的典型用法代码示例。如果您正苦于以下问题:Python layers.get_output_shape方法的具体用法?Python layers.get_output_shape怎么用?Python layers.get_output_shape使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在lasagne.layers的用法示例。


在下文中一共展示了layers.get_output_shape方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import get_output_shape [as 别名]
def __init__(self, values, ref_img, sxy=60, sc=10, norm_type="sym",
                 name=None):

        C = ll.get_output_shape(ref_img)[1]
        if C not in [1, 3]:
            raise ValueError("Bilateral filtering requires a color or \
greyscale reference image. Got %d channels." % C)

        if C == 1:
            kern_std = np.array([sxy, sxy, sc], np.float32)
        else:
            kern_std = np.array([sxy, sxy, sc, sc, sc], np.float32)

        super(BilateralFilterLayer, self).__init__(values, ref_img, kern_std,
                                                   norm_type, name=name,
                                                   _bilateral=True) 
开发者ID:HapeMask,项目名称:crfrnn_layer,代码行数:18,代码来源:layers.py

示例2: __init__

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import get_output_shape [as 别名]
def __init__(self, incoming, channel_layer_class, name=None, **channel_layer_kwargs):
        super(ChannelwiseLayer, self).__init__(incoming, name=name)
        self.channel_layer_class = channel_layer_class
        self.channel_incomings = []
        self.channel_outcomings = []
        for channel in range(lasagne.layers.get_output_shape(incoming)[0]):
            channel_incoming = L.SliceLayer(incoming, indices=slice(channel, channel+1), axis=1,
                                            name='%s.%s%d' % (name, 'slice', channel) if name is not None else None)
            channel_outcoming = channel_layer_class(channel_incoming,
                                                    name='%s.%s%d' % (name, 'op', channel) if name is not None else None,
                                                    **channel_layer_kwargs)
            self.channel_incomings.append(channel_incoming)
            self.channel_outcomings.append(channel_outcoming)
        self.outcoming = L.ConcatLayer(self.channel_outcomings, axis=1,
                                       name='%s.%s' % (name, 'concat') if name is not None else None) 
开发者ID:alexlee-gk,项目名称:visual_dynamics,代码行数:17,代码来源:layers_theano.py

示例3: build_convpool_conv1d

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import get_output_shape [as 别名]
def build_convpool_conv1d(input_vars, nb_classes, imsize=32, n_colors=3, n_timewin=7):
    """
    Builds the complete network with 1D-conv layer to integrate time from sequences of EEG images.

    :param input_vars: list of EEG images (one image per time window)
    :param nb_classes: number of classes
    :param imsize: size of the input image (assumes a square input)
    :param n_colors: number of color channels in the image
    :param n_timewin: number of time windows in the snippet
    :return: a pointer to the output of last layer
    """
    convnets = []
    w_init = None
    # Build 7 parallel CNNs with shared weights
    for i in range(n_timewin):
        if i == 0:
            convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)
        else:
            convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)
        convnets.append(FlattenLayer(convnet))
    # at this point convnets shape is [numTimeWin][n_samples, features]
    # we want the shape to be [n_samples, features, numTimeWin]
    convpool = ConcatLayer(convnets)
    convpool = ReshapeLayer(convpool, ([0], n_timewin, get_output_shape(convnets[0])[1]))
    convpool = DimshuffleLayer(convpool, (0, 2, 1))
    # input to 1D convlayer should be in (batch_size, num_input_channels, input_length)
    convpool = Conv1DLayer(convpool, 64, 3)
    # A fully-connected layer of 512 units with 50% dropout on its inputs:
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
            num_units=512, nonlinearity=lasagne.nonlinearities.rectify)
    # And, finally, the output layer with 50% dropout on its inputs:
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
            num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)
    return convpool 
开发者ID:pbashivan,项目名称:EEGLearn,代码行数:36,代码来源:eeg_cnn_lib.py

示例4: build_convpool_lstm

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import get_output_shape [as 别名]
def build_convpool_lstm(input_vars, nb_classes, grad_clip=110, imsize=32, n_colors=3, n_timewin=7):
    """
    Builds the complete network with LSTM layer to integrate time from sequences of EEG images.

    :param input_vars: list of EEG images (one image per time window)
    :param nb_classes: number of classes
    :param grad_clip:  the gradient messages are clipped to the given value during
                        the backward pass.
    :param imsize: size of the input image (assumes a square input)
    :param n_colors: number of color channels in the image
    :param n_timewin: number of time windows in the snippet
    :return: a pointer to the output of last layer
    """
    convnets = []
    w_init = None
    # Build 7 parallel CNNs with shared weights
    for i in range(n_timewin):
        if i == 0:
            convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)
        else:
            convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)
        convnets.append(FlattenLayer(convnet))
    # at this point convnets shape is [numTimeWin][n_samples, features]
    # we want the shape to be [n_samples, features, numTimeWin]
    convpool = ConcatLayer(convnets)
    convpool = ReshapeLayer(convpool, ([0], n_timewin, get_output_shape(convnets[0])[1]))
    # Input to LSTM should have the shape as (batch size, SEQ_LENGTH, num_features)
    convpool = LSTMLayer(convpool, num_units=128, grad_clipping=grad_clip,
        nonlinearity=lasagne.nonlinearities.tanh)
    # We only need the final prediction, we isolate that quantity and feed it
    # to the next layer.
    convpool = SliceLayer(convpool, -1, 1)      # Selecting the last prediction
    # A fully-connected layer of 256 units with 50% dropout on its inputs:
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
            num_units=256, nonlinearity=lasagne.nonlinearities.rectify)
    # And, finally, the output layer with 50% dropout on its inputs:
    convpool = DenseLayer(lasagne.layers.dropout(convpool, p=.5),
            num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)
    return convpool 
开发者ID:pbashivan,项目名称:EEGLearn,代码行数:41,代码来源:eeg_cnn_lib.py

示例5: classificationBranch

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import get_output_shape [as 别名]
def classificationBranch(net, kernel_size):

    # Post Convolution
    branch = l.batch_norm(l.Conv2DLayer(net,
                        num_filters=int(FILTERS[-1] * RESNET_K),
                        filter_size=kernel_size,
                        nonlinearity=nl.rectify))

    #log.p(("\t\tPOST  CONV SHAPE:", l.get_output_shape(branch), "LAYER:", len(l.get_all_layers(branch)) - 1))

    # Dropout Layer
    branch = l.DropoutLayer(branch)
    
    # Dense Convolution
    branch = l.batch_norm(l.Conv2DLayer(branch,
                        num_filters=int(FILTERS[-1] * RESNET_K * 2),
                        filter_size=1,
                        nonlinearity=nl.rectify))

    #log.p(("\t\tDENSE CONV SHAPE:", l.get_output_shape(branch), "LAYER:", len(l.get_all_layers(branch)) - 1))
    
    # Dropout Layer
    branch = l.DropoutLayer(branch)
    
    # Class Convolution
    branch = l.Conv2DLayer(branch,
                        num_filters=len(cfg.CLASSES),
                        filter_size=1,
                        nonlinearity=None)
    return branch 
开发者ID:kahst,项目名称:BirdNET,代码行数:32,代码来源:model.py

示例6: __init__

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import get_output_shape [as 别名]
def __init__(self, predictor, alpha=1.0, lambda_=0.0, w=1.0, use_constrained_opt=False, unweighted_features=False, algorithm_or_fname=None):
        if isinstance(predictor, str):
            with open(predictor) as predictor_file:
                predictor = from_yaml(predictor_file)
        self.predictor = predictor
        self.action_transformer = self.predictor.transformers['u']
        self.action_space = from_config(self.predictor.environment_config['action_space'])
        self.alpha = alpha
        lambda_ = np.asarray(lambda_)
        if np.isscalar(lambda_) or lambda_.ndim == 0:
            lambda_ = lambda_ * np.ones(self.action_space.shape)  # numpy fails with augmented assigment
        assert lambda_.shape == self.action_space.shape
        self._lambda_ = lambda_
        feature_names = iter_util.flatten_tree(self.predictor.feature_name)
        feature_shapes = L.get_output_shape([self.predictor.pred_layers[name] for name in feature_names])
        self.repeats = []
        for feature_shape in feature_shapes:
            self.repeats.extend([np.prod(feature_shape[2:])] * feature_shape[1])
        w = np.asarray(w)
        if np.isscalar(w) or w.ndim == 0 or len(w) == 1:
            w = w * np.ones(len(self.repeats))  # numpy fails with augmented assigment
        elif w.shape == (len(feature_names),):
            w = np.repeat(w, [feature_shape[1] for feature_shape in feature_shapes])
        assert w.shape == (len(self.repeats),)
        self._w = w
        self._theta = np.append(self._w, self._lambda_)
        self._w, self._lambda_ = np.split(self._theta, [len(self._w)])  # alias the parameters
        self.use_constrained_opt = use_constrained_opt
        self.unweighted_features = unweighted_features
        self.image_name = 'image'
        self.target_image_name = 'target_image'

        if algorithm_or_fname is not None:
            from visual_dynamics.algorithms import ServoingFittedQIterationAlgorithm
            if isinstance(algorithm_or_fname, str):
                with open(algorithm_or_fname) as algorithm_file:
                    algorithm_config = yaml.load(algorithm_file, Loader=Python2to3Loader)
                assert issubclass(algorithm_config['class'], ServoingFittedQIterationAlgorithm)
                mean_returns = algorithm_config['mean_returns']
                thetas = algorithm_config['thetas']
            else:
                algorithm = algorithm_or_fname
                assert isinstance(algorithm, ServoingFittedQIterationAlgorithm)
                mean_returns = algorithm.mean_returns
                thetas = algorithm.thetas
            print("using parameters based on best returns")
            best_return, best_theta = max(zip(mean_returns, thetas))
            print(best_return)
            print(best_theta)
            self.theta = best_theta 
开发者ID:alexlee-gk,项目名称:visual_dynamics,代码行数:52,代码来源:servoing_policy.py

示例7: _get_pi2_var

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import get_output_shape [as 别名]
def _get_pi2_var(self):
        if not self.predictor.feature_jacobian_name:
            raise NotImplementedError

        X_var, U_var, X_target_var, U_lin_var, alpha_var = self.input_vars

        names = [self.predictor.feature_name, self.predictor.feature_jacobian_name, self.predictor.next_feature_name]
        vars_ = L.get_output([self.predictor.pred_layers[name] for name in iter_util.flatten_tree(names)], deterministic=True)
        feature_vars, jac_vars, next_feature_vars = iter_util.unflatten_tree(names, vars_)

        y_vars = [T.flatten(feature_var, outdim=2) for feature_var in feature_vars]
        y_target_vars = [theano.clone(y_var, replace={X_var: X_target_var}) for y_var in y_vars]
        y_target_vars = [theano.ifelse.ifelse(T.eq(alpha_var, 1.0),
                                              y_target_var,
                                              alpha_var * y_target_var + (1 - alpha_var) * y_var)
                         for (y_var, y_target_var) in zip(y_vars, y_target_vars)]

        jac_vars = [theano.clone(jac_var, replace={U_var: U_lin_var}) for jac_var in jac_vars]
        y_next_pred_vars = [T.flatten(next_feature_var, outdim=2) for next_feature_var in next_feature_vars]
        y_next_pred_vars = [theano.clone(y_next_pred_var, replace={U_var: U_lin_var}) for y_next_pred_var in y_next_pred_vars]

        z_vars = [y_target_var - y_next_pred_var + T.batched_tensordot(jac_var, U_lin_var, axes=(2, 1))
                  for (y_target_var, y_next_pred_var, jac_var) in zip(y_target_vars, y_next_pred_vars, jac_vars)]

        feature_shapes = L.get_output_shape([self.predictor.pred_layers[name] for name in iter_util.flatten_tree(self.predictor.feature_name)])

        w_var, lambda_var = self.param_vars

        A_var = None
        b_var = None
        normalized_w_vars = T.split(w_var / self.repeats, [feature_shape[1] for feature_shape in feature_shapes], len(feature_shapes))
        for jac_var, z_var, normalized_w_var, feature_shape in zip(jac_vars, z_vars, normalized_w_vars, feature_shapes):
            z_var = T.flatten(z_var)
            jac_var = T.reshape(jac_var, (feature_shape[1], -1, self.action_space.shape[0]))
            jac_w_var = T.reshape(jac_var * normalized_w_var[:, None, None], (-1, self.action_space.shape[0]))
            jac_var = jac_var.reshape((-1, self.action_space.shape[0]))
            if A_var is None:
                A_var = jac_var.T.dot(jac_w_var)
            else:
                A_var += jac_var.T.dot(jac_w_var)
            if b_var is None:
                b_var = z_var.dot(jac_w_var)
            else:
                b_var += z_var.dot(jac_w_var)
        A_var += T.diag(lambda_var)
        pi_var = T.dot(T.nlinalg.matrix_inverse(A_var), b_var)  # preprocessed units
        return pi_var 
开发者ID:alexlee-gk,项目名称:visual_dynamics,代码行数:49,代码来源:servoing_policy.py

示例8: _get_A_b_c_split2_vars

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import get_output_shape [as 别名]
def _get_A_b_c_split2_vars(self):
        """
        Like _get_A_b_c_split2_vars() except that the first two dimensions of
        A_split_var, b_split_var, c_split_var are the batch size and the
        number of channels, instead of the other way around.
        """
        if not self.predictor.feature_jacobian_name:
            raise NotImplementedError

        X_var, U_var, X_target_var, U_lin_var, alpha_var = self.input_vars

        names = [self.predictor.feature_name, self.predictor.feature_jacobian_name, self.predictor.next_feature_name]
        vars_ = L.get_output([self.predictor.pred_layers[name] for name in iter_util.flatten_tree(names)], deterministic=True)
        feature_vars, jac_vars, next_feature_vars = iter_util.unflatten_tree(names, vars_)

        y_vars = [T.flatten(feature_var, outdim=2) for feature_var in feature_vars]
        y_target_vars = [theano.clone(y_var, replace={X_var: X_target_var}) for y_var in y_vars]
        y_target_vars = [theano.ifelse.ifelse(T.eq(alpha_var, 1.0),
                                              y_target_var,
                                              alpha_var * y_target_var + (1 - alpha_var) * y_var)
                         for (y_var, y_target_var) in zip(y_vars, y_target_vars)]

        jac_vars = [theano.clone(jac_var, replace={U_var: U_lin_var}) for jac_var in jac_vars]
        y_next_pred_vars = [T.flatten(next_feature_var, outdim=2) for next_feature_var in next_feature_vars]
        y_next_pred_vars = [theano.clone(y_next_pred_var, replace={U_var: U_lin_var}) for y_next_pred_var in y_next_pred_vars]

        z_vars = [y_target_var - y_next_pred_var + T.batched_tensordot(jac_var, U_lin_var, axes=(2, 1))
                  for (y_target_var, y_next_pred_var, jac_var) in zip(y_target_vars, y_next_pred_vars, jac_vars)]

        feature_shapes = L.get_output_shape([self.predictor.pred_layers[name] for name in iter_util.flatten_tree(self.predictor.feature_name)])

        u_dim, = self.action_space.shape
        A_split_vars = []
        b_split_vars = []
        c_split_vars = []
        for jac_var, z_var, feature_shape in zip(jac_vars, z_vars, feature_shapes):
            z_var = z_var.reshape((-1, np.prod(feature_shape[2:])))
            jac_var = jac_var.reshape((-1, np.prod(feature_shape[2:]), u_dim))
            A_split_var = T.batched_tensordot(jac_var, jac_var, axes=(1, 1))
            b_split_var = T.batched_tensordot(jac_var, z_var, axes=(1, 1))
            c_split_var = T.batched_tensordot(z_var, z_var, axes=(1, 1))
            A_split_vars.append(A_split_var.reshape((-1, feature_shape[1], u_dim, u_dim)))
            b_split_vars.append(b_split_var.reshape((-1, feature_shape[1], u_dim)))
            c_split_vars.append(c_split_var.reshape((-1, feature_shape[1])))
        A_split_var = T.concatenate(A_split_vars, axis=1)
        b_split_var = T.concatenate(b_split_vars, axis=1)
        c_split_var = T.concatenate(c_split_vars, axis=1)
        return A_split_var, b_split_var, c_split_var 
开发者ID:alexlee-gk,项目名称:visual_dynamics,代码行数:50,代码来源:servoing_policy.py

示例9: build_action_cond_encoder_net

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import get_output_shape [as 别名]
def build_action_cond_encoder_net(input_shapes, **kwargs):
    x_shape, u_shape = input_shapes

    X_var = T.tensor4('X')
    U_var = T.matrix('U')
    X_diff_var = T.tensor4('X_diff')
    X_next_var = X_var + X_diff_var

    l_x0 = L.InputLayer(shape=(None,) + x_shape, input_var=X_var, name='x')
    l_u = L.InputLayer(shape=(None,) + u_shape, input_var=U_var, name='u')

    l_x1 = L.Conv2DLayer(l_x0, 64, filter_size=6, stride=2, pad=0,
                         nonlinearity=nl.rectify,
                         name='x1')
    l_x2 = L.Conv2DLayer(l_x1, 64, filter_size=6, stride=2, pad=2,
                         nonlinearity=nl.rectify,
                         name='x2')
    l_x3 = L.Conv2DLayer(l_x2, 64, filter_size=6, stride=2, pad=2,
                         nonlinearity=nl.rectify,
                         name='x3')
    l_x3_shape = lasagne.layers.get_output_shape(l_x3)

    l_y4 = L.DenseLayer(l_x3, 1024, nonlinearity=nl.rectify, name='y')
    l_y4d = L.DenseLayer(l_y4, 2048, W=init.Uniform(1.0), nonlinearity=None)
    l_ud = L.DenseLayer(l_u, 2048, W=init.Uniform(0.1), nonlinearity=None)

    l_y4d_diff_pred = L.ElemwiseMergeLayer([l_y4d, l_ud], T.mul)
    l_y4_diff_pred = L.DenseLayer(l_y4d_diff_pred, 1024, W=init.Uniform(1.0), nonlinearity=None, name='y_diff_pred')

    l_y4_next_pred = L.ElemwiseMergeLayer([l_y4, l_y4_diff_pred], T.add, name='y_next_pred')

    l_y3_next_pred = L.DenseLayer(l_y4_next_pred, np.prod(l_x3_shape[1:]), nonlinearity=nl.rectify)
    l_x3_next_pred = L.ReshapeLayer(l_y3_next_pred, ([0],) + l_x3_shape[1:],
                                   name='x3_next_pred')

    l_x2_next_pred = LT.Deconv2DLayer(l_x3_next_pred, 64, filter_size=6, stride=2, pad=2,
                                   nonlinearity=nl.rectify,
                                   name='x2_next_pred')
    l_x1_next_pred = LT.Deconv2DLayer(l_x2_next_pred, 64, filter_size=6, stride=2, pad=2,
                                   nonlinearity=nl.rectify,
                                   name='x1_next_pred')
    l_x0_next_pred = LT.Deconv2DLayer(l_x1_next_pred, 3, filter_size=6, stride=2, pad=0,
                                   nonlinearity=None,
                                   name='x0_next_pred')

    loss_fn = lambda X, X_pred: ((X - X_pred) ** 2).mean(axis=0).sum() / 2.
    loss = loss_fn(X_next_var, lasagne.layers.get_output(l_x0_next_pred))

    net_name = 'ActionCondEncoderNet'
    input_vars = OrderedDict([(var.name, var) for var in [X_var, U_var, X_diff_var]])
    pred_layers = OrderedDict([('x0_next_pred', l_x0_next_pred)])
    return net_name, input_vars, pred_layers, loss 
开发者ID:alexlee-gk,项目名称:visual_dynamics,代码行数:54,代码来源:net_theano.py

示例10: build_convpool_mix

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import get_output_shape [as 别名]
def build_convpool_mix(input_vars, nb_classes, grad_clip=110, imsize=32, n_colors=3, n_timewin=7):
    """
    Builds the complete network with LSTM and 1D-conv layers combined

    :param input_vars: list of EEG images (one image per time window)
    :param nb_classes: number of classes
    :param grad_clip:  the gradient messages are clipped to the given value during
                        the backward pass.
    :param imsize: size of the input image (assumes a square input)
    :param n_colors: number of color channels in the image
    :param n_timewin: number of time windows in the snippet
    :return: a pointer to the output of last layer
    """
    convnets = []
    w_init = None
    # Build 7 parallel CNNs with shared weights
    for i in range(n_timewin):
        if i == 0:
            convnet, w_init = build_cnn(input_vars[i], imsize=imsize, n_colors=n_colors)
        else:
            convnet, _ = build_cnn(input_vars[i], w_init=w_init, imsize=imsize, n_colors=n_colors)
        convnets.append(FlattenLayer(convnet))
    # at this point convnets shape is [numTimeWin][n_samples, features]
    # we want the shape to be [n_samples, features, numTimeWin]
    convpool = ConcatLayer(convnets)
    convpool = ReshapeLayer(convpool, ([0], n_timewin, get_output_shape(convnets[0])[1]))
    reformConvpool = DimshuffleLayer(convpool, (0, 2, 1))
    # input to 1D convlayer should be in (batch_size, num_input_channels, input_length)
    conv_out = Conv1DLayer(reformConvpool, 64, 3)
    conv_out = FlattenLayer(conv_out)
    # Input to LSTM should have the shape as (batch size, SEQ_LENGTH, num_features)
    lstm = LSTMLayer(convpool, num_units=128, grad_clipping=grad_clip,
        nonlinearity=lasagne.nonlinearities.tanh)
    lstm_out = SliceLayer(lstm, -1, 1)
    # Merge 1D-Conv and LSTM outputs
    dense_input = ConcatLayer([conv_out, lstm_out])
    # A fully-connected layer of 256 units with 50% dropout on its inputs:
    convpool = DenseLayer(lasagne.layers.dropout(dense_input, p=.5),
            num_units=512, nonlinearity=lasagne.nonlinearities.rectify)
    # And, finally, the 10-unit output layer with 50% dropout on its inputs:
    convpool = DenseLayer(convpool,
            num_units=nb_classes, nonlinearity=lasagne.nonlinearities.softmax)
    return convpool 
开发者ID:pbashivan,项目名称:EEGLearn,代码行数:45,代码来源:eeg_cnn_lib.py

示例11: buildModel

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import get_output_shape [as 别名]
def buildModel():

    print "BUILDING MODEL TYPE..."

    #default settings
    filters = 64
    first_stride = 2
    last_filter_multiplier = 16

    #input layer
    net = l.InputLayer((None, IM_DIM, IM_SIZE[1], IM_SIZE[0]))

    #conv layers
    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=7, pad='same', stride=first_stride, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 2, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 4, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 8, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * last_filter_multiplier, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    print "\tFINAL POOL OUT SHAPE:", l.get_output_shape(net) 

    #dense layers
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.DropoutLayer(net, DROPOUT)  
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.DropoutLayer(net, DROPOUT)  

    #Classification Layer
    if MULTI_LABEL:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1))
    else:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.softmax, W=init.HeNormal(gain=1))

    print "...DONE!"

    #model stats
    print "MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"
    print "MODEL HAS", l.count_params(net), "PARAMS"

    return net 
开发者ID:kahst,项目名称:AcousticEventDetection,代码行数:51,代码来源:AED_train.py

示例12: build_resnet_model

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import get_output_shape [as 别名]
def build_resnet_model():

    log.i('BUILDING RESNET MODEL...')

    # Random Seed
    lasagne_random.set_rng(cfg.getRandomState())

    # Input layer for images
    net = l.InputLayer((None, cfg.IM_DIM, cfg.IM_SIZE[1], cfg.IM_SIZE[0]))

    # First Convolution
    net = l.Conv2DLayer(net,
                        num_filters=cfg.FILTERS[0],
                        filter_size=cfg.KERNEL_SIZES[0],
                        pad='same',
                        W=initialization(cfg.NONLINEARITY),
                        nonlinearity=None)
    
    log.i(("\tFIRST CONV OUT SHAPE:", l.get_output_shape(net), "LAYER:", len(l.get_all_layers(net)) - 1))

    # Residual Stacks
    for i in range(0, len(cfg.FILTERS)):
        net = resblock(net, filters=cfg.FILTERS[i] * cfg.RESNET_K, kernel_size=cfg.KERNEL_SIZES[i], stride=2, num_groups=cfg.NUM_OF_GROUPS[i])
        for _ in range(1, cfg.RESNET_N):
            net = resblock(net, filters=cfg.FILTERS[i] * cfg.RESNET_K, kernel_size=cfg.KERNEL_SIZES[i], num_groups=cfg.NUM_OF_GROUPS[i], preactivated=False)
        log.i(("\tRES STACK", i + 1, "OUT SHAPE:", l.get_output_shape(net), "LAYER:", len(l.get_all_layers(net)) - 1))
        
    # Post Activation
    net = batch_norm(net)
    net = l.NonlinearityLayer(net, nonlinearity=nonlinearity(cfg.NONLINEARITY))
        
    # Pooling
    net = l.GlobalPoolLayer(net)
    log.i(("\tFINAL POOLING SHAPE:", l.get_output_shape(net), "LAYER:", len(l.get_all_layers(net)) - 1))

    # Classification Layer    
    net = l.DenseLayer(net, len(cfg.CLASSES), nonlinearity=nonlinearity('identity'), W=initialization('identity'))
    net = l.NonlinearityLayer(net, nonlinearity=nonlinearity('softmax'))

    log.i(("\tFINAL NET OUT SHAPE:", l.get_output_shape(net), "LAYER:", len(l.get_all_layers(net))))
    log.i("...DONE!")

    # Model stats
    log.i(("MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"))
    log.i(("MODEL HAS", l.count_params(net), "PARAMS"))

    return net

################## PASPBERRY PI NET ##################### 
开发者ID:kahst,项目名称:BirdCLEF-Baseline,代码行数:51,代码来源:lasagne_net.py

示例13: build_pi_model

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import get_output_shape [as 别名]
def build_pi_model():

    log.i('BUILDING RASBPERRY PI MODEL...')

    # Random Seed
    lasagne_random.set_rng(cfg.getRandomState())

    # Input layer for images
    net = l.InputLayer((None, cfg.IM_DIM, cfg.IM_SIZE[1], cfg.IM_SIZE[0]))

    # Convolutinal layer groups
    for i in range(len(cfg.FILTERS)):
        
        # 3x3 Convolution + Stride
        net = batch_norm(l.Conv2DLayer(net,
                                       num_filters=cfg.FILTERS[i],
                                       filter_size=cfg.KERNEL_SIZES[i],
                                       num_groups=cfg.NUM_OF_GROUPS[i],
                                       pad='same',
                                       stride=2,
                                       W=initialization(cfg.NONLINEARITY),
                                       nonlinearity=nonlinearity(cfg.NONLINEARITY)))
        
        log.i(('\tGROUP', i + 1, 'OUT SHAPE:', l.get_output_shape(net)))
        
    # Fully connected layers + dropout layers
    net = l.DenseLayer(net, cfg.DENSE_UNITS, nonlinearity=nonlinearity(cfg.NONLINEARITY), W=initialization(cfg.NONLINEARITY))    
    net = l.DropoutLayer(net, p=cfg.DROPOUT)
    
    net = l.DenseLayer(net, cfg.DENSE_UNITS, nonlinearity=nonlinearity(cfg.NONLINEARITY), W=initialization(cfg.NONLINEARITY))        
    net = l.DropoutLayer(net, p=cfg.DROPOUT)
    
    # Classification Layer (Softmax)
    net = l.DenseLayer(net, len(cfg.CLASSES), nonlinearity=nonlinearity('softmax'), W=initialization('softmax'))
    
    log.i(("\tFINAL NET OUT SHAPE:", l.get_output_shape(net)))
    log.i("...DONE!")

    # Model stats
    log.i(("MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"))
    log.i(("MODEL HAS", l.count_params(net), "PARAMS"))

    return net

################## BUILDING THE MODEL ################### 
开发者ID:kahst,项目名称:BirdCLEF-Baseline,代码行数:47,代码来源:lasagne_net.py

示例14: buildModel

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import get_output_shape [as 别名]
def buildModel(mtype=1):

    print "BUILDING MODEL TYPE", mtype, "..."

    #default settings (Model 1)
    filters = 64
    first_stride = 2
    last_filter_multiplier = 16

    #specific model type settings (see working notes for details)
    if mtype == 2:
        first_stride = 1
    elif mtype == 3:
        filters = 32
        last_filter_multiplier = 8

    #input layer
    net = l.InputLayer((None, IM_DIM, IM_SIZE[1], IM_SIZE[0]))

    #conv layers
    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=7, pad='same', stride=first_stride, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    if mtype == 2:
        net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
        net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 2, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 4, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 8, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * last_filter_multiplier, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    print "\tFINAL POOL OUT SHAPE:", l.get_output_shape(net) 

    #dense layers
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))

    #Classification Layer
    if MULTI_LABEL:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1))
    else:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.softmax, W=init.HeNormal(gain=1))

    print "...DONE!"

    #model stats
    print "MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"
    print "MODEL HAS", l.count_params(net), "PARAMS"

    return net 
开发者ID:kahst,项目名称:BirdCLEF2017,代码行数:60,代码来源:birdCLEF_evaluate.py

示例15: buildModel

# 需要导入模块: from lasagne import layers [as 别名]
# 或者: from lasagne.layers import get_output_shape [as 别名]
def buildModel(mtype=1):

    print "BUILDING MODEL TYPE", mtype, "..."

    #default settings (Model 1)
    filters = 64
    first_stride = 2
    last_filter_multiplier = 16

    #specific model type settings (see working notes for details)
    if mtype == 2:
        first_stride = 1
    elif mtype == 3:
        filters = 32
        last_filter_multiplier = 8

    #input layer
    net = l.InputLayer((None, IM_DIM, IM_SIZE[1], IM_SIZE[0]))

    #conv layers
    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=7, pad='same', stride=first_stride, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    if mtype == 2:
        net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
        net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 2, filter_size=5, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 4, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * 8, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    net = l.batch_norm(l.Conv2DLayer(net, num_filters=filters * last_filter_multiplier, filter_size=3, pad='same', stride=1, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.MaxPool2DLayer(net, pool_size=2)

    print "\tFINAL POOL OUT SHAPE:", l.get_output_shape(net) 

    #dense layers
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.DropoutLayer(net, DROPOUT)  
    net = l.batch_norm(l.DenseLayer(net, 512, W=init.HeNormal(gain=INIT_GAIN), nonlinearity=NONLINEARITY))
    net = l.DropoutLayer(net, DROPOUT)  

    #Classification Layer
    if MULTI_LABEL:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.sigmoid, W=init.HeNormal(gain=1))
    else:
        net = l.DenseLayer(net, NUM_CLASSES, nonlinearity=nonlinearities.softmax, W=init.HeNormal(gain=1))

    print "...DONE!"

    #model stats
    print "MODEL HAS", (sum(hasattr(layer, 'W') for layer in l.get_all_layers(net))), "WEIGHTED LAYERS"
    print "MODEL HAS", l.count_params(net), "PARAMS"

    return net 
开发者ID:kahst,项目名称:BirdCLEF2017,代码行数:62,代码来源:birdCLEF_train.py


注:本文中的lasagne.layers.get_output_shape方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。