当前位置: 首页>>代码示例>>Python>>正文


Python tensor.bmatrix函数代码示例

本文整理汇总了Python中theano.tensor.bmatrix函数的典型用法代码示例。如果您正苦于以下问题:Python bmatrix函数的具体用法?Python bmatrix怎么用?Python bmatrix使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了bmatrix函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_illegal_things

 def test_illegal_things(self):
     i0 = TT.iscalar()
     i1 = TT.lvector()
     i2 = TT.bmatrix()
     self.failUnlessRaises(TypeError, FAS, [i1, slice(None, i2, -1), i0])
     self.failUnlessRaises(TypeError, FAS, [i1, slice(None, None, i2), i0])
     self.failUnlessRaises(TypeError, FAS, [i1, slice(i2, None, -1), i0])
开发者ID:jaberg,项目名称:theano-advidx,代码行数:7,代码来源:test_advidx.py

示例2: __init__

    def __init__(self, model, latent):
        """ Initialize the stochastic block model for the adjacency matrix
        """
        self.model = model
        self.latent = latent
        self.prms = model['network']['graph']
        self.N = model['N']

        # Get the number of latent types (R) and the latent type vector (Y)
        self.type_name = self.prms['types']
        self.R = self.latent[self.type_name].R
        self.Y = self.latent[self.type_name].Y

        # A RxR matrix of connection probabilities per pair of clusters
        self.B = T.dmatrix('B')

        # For indexing, we also need Y as a column vector and tiled matrix
        self.Yv = T.reshape(self.Y, [self.N, 1])
        self.Ym = T.tile(self.Yv, [1, self.N])
        self.pA = self.B[self.Ym, T.transpose(self.Ym)]

        # Hyperparameters governing B and alpha
        self.b0 = self.prms['b0']
        self.b1 = self.prms['b1']

        # Define complete adjacency matrix
        self.A = T.bmatrix('A')

        # Define log probability
        log_p_B = T.sum((self.b0 - 1) * T.log(self.B) + (self.b1 - 1) * T.log(1 - self.B))
        log_p_A = T.sum(self.A * T.log(self.pA) + (1 - self.A) * T.log(1 - self.pA))

        self.log_p = log_p_B + log_p_A
开发者ID:remtcs,项目名称:theano_pyglm,代码行数:33,代码来源:graph.py

示例3: test_local_gpu_elemwise_0

def test_local_gpu_elemwise_0():
    """
    Test local_gpu_elemwise_0 when there is a dtype upcastable to float32
    """
    a = tensor.bmatrix()
    b = tensor.fmatrix()
    c = tensor.fmatrix()

    a_v = (numpy.random.rand(4, 5) * 10).astype("int8")
    b_v = (numpy.random.rand(4, 5) * 10).astype("float32")
    c_v = (numpy.random.rand(4, 5) * 10).astype("float32")

    # Due to optimization order, this composite is created when all
    # the op are on the gpu.
    f = theano.function([a, b, c], [a + b + c], mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert sum(isinstance(node.op, cuda.GpuElemwise) for node in topo) == 1
    assert sum(isinstance(node.op, tensor.Elemwise) for node in topo) == 1
    f(a_v, b_v, c_v)

    # Now test with the composite already on the cpu before we move it
    # to the gpu
    a_s = theano.scalar.int8()
    b_s = theano.scalar.float32()
    c_s = theano.scalar.float32()
    out_s = theano.scalar.Composite([a_s, b_s, c_s], [a_s + b_s + c_s])
    out_op = tensor.Elemwise(out_s)
    f = theano.function([a, b, c], [out_op(a, b, c)], mode=mode_with_gpu)
    topo = f.maker.fgraph.toposort()
    assert sum(isinstance(node.op, cuda.GpuElemwise) for node in topo) == 1
    assert sum(isinstance(node.op, tensor.Elemwise) for node in topo) == 1
    f(a_v, b_v, c_v)
开发者ID:OlafLee,项目名称:Theano,代码行数:32,代码来源:test_opt.py

示例4: __init__

    def __init__(self, nodes_per_layer, act_funcs, err_func, backprop_func, backprop_params,
                 l_rate=.001, batch_size=100):
        """
        layer_shape - number of nodes per layer, including input and output layers
        act_funcs - list activation functions between the layers
        err_func - cost/error function
        backprop_func - backpropagation function
        l_rate - Learning rate
        """
        assert len(nodes_per_layer)-1 == len(act_funcs), \
            ("Invalid number of activation functions compared to the number of hidden layers",
             len(nodes_per_layer), len(act_funcs))
        super(FFNet, self).__init__('FFNet', l_rate, batch_size)
        logging.info('\tConstructing FFNet with nodes per layer: %s, learning rate: %s ', nodes_per_layer, l_rate)

        input_data = T.fmatrix('X')
        input_labels = T.bmatrix('Y')
        layers = [input_data]

        # Generate initial random weights between each layer
        weights = []
        for i in range(len(nodes_per_layer)-1):
            weights.append(init_rand_weights((nodes_per_layer[i], nodes_per_layer[i+1])))
            weights[i].name = 'w' + str(i)

        # logging.debug('\tWeight layers: %s', len(weights))
        #logging.info('\tNumber of parameters to train: %s',
        #             sum(param.get_value(borrow=True, return_internal_type=True).size for param in weights))
        # Construct the layers with the given activation functions weights between them
        # logging.info('\tConstructing layers ...')

        for i in range(len(weights)):
            layers.append(self.model(layers[i], weights[i], act_funcs[i]))

        for i in range(1, len(layers)):
            layers[i].name = 'l' + str(i)

        output_layer = layers[-1]
        cost = err_func(output_layer, input_labels)
        updates = backprop_func(cost, weights, self.l_rate, **backprop_params)

        prediction = T.argmax(output_layer, axis=1)
        prediction_value = T.max(output_layer, axis=1)

        # logging.info('\tConstructing functions ...')
        self.trainer = theano.function(
            inputs=[input_data, input_labels],
            outputs=cost,
            updates=updates,
            name='Trainer',
            allow_input_downcast=True  # Allows float64 to be casted as float32, which is necessary in order to use GPU
        )
        self.predictor = theano.function(
            inputs=[input_data],
            outputs={'char_as_int': prediction,
                     'char_probability': prediction_value,
                     'output_layer': output_layer},
            name='Predictor',
            allow_input_downcast=True
        )
开发者ID:tsoernes,项目名称:tdt4137,代码行数:60,代码来源:ffnet.py

示例5: __init__

    def __init__(self, model):
        """ Initialize the stochastic block model for the adjacency matrix
        """
        self.model = model
        self.prms = model['network']['graph']
        self.N = model['N']

        # SBM has R latent clusters
        self.R = self.prms['R']
        # A RxR matrix of connection probabilities per pair of clusters
        self.B = T.dmatrix('B')
        # SBM has a latent block or cluster assignment for each node
        self.Y = T.lvector('Y')
        # For indexing, we also need Y as a column vector and tiled matrix
        self.Yv = T.reshape(self.Y, [self.N, 1])
        self.Ym = T.tile(self.Yv, [1, self.N])
        self.pA = self.B[self.Ym, T.transpose(self.Ym)]

        # A probability of each cluster
        self.alpha = T.dvector('alpha')

        # Hyperparameters governing B and alpha
        self.b0 = self.prms['b0']
        self.b1 = self.prms['b1']
        self.alpha0 = self.prms['alpha0']

        # Define complete adjacency matrix
        self.A = T.bmatrix('A')

        # Define log probability
        log_p_B = T.sum((self.b0 - 1) * T.log(self.B) + (self.b1 - 1) * T.log(1 - self.B))
        log_p_alpha = T.sum((self.alpha0 - 1) * T.log(self.alpha))
        log_p_A = T.sum(self.A * T.log(self.pA) + (1 - self.A) * T.log(1 - self.pA))

        self.log_p = log_p_B + log_p_alpha + log_p_A
开发者ID:mmyros,项目名称:pyglm,代码行数:35,代码来源:graph.py

示例6: ndim_btensor

def ndim_btensor(ndim, name=None):
    if ndim == 2:
        return T.bmatrix(name)
    elif ndim == 3:
        return T.btensor3(name)
    elif ndim == 4:
        return T.btensor4(name)
    return T.imatrix(name)
开发者ID:chubbymaggie,项目名称:NL2code,代码行数:8,代码来源:theano_utils.py

示例7: test3_ndarray

    def test3_ndarray(self):

        i0 = TT.iscalar()
        i1 = TT.lvector()
        i2 = TT.bmatrix()
        
        f = FAS([i1, slice(None, i0, -1), i2])
        assert f.n_in == 4
        assert f.idx_tuple == (i1.type, slice(0, i0.type, -1), i2.type,)
        assert f.view_map == {}
开发者ID:jaberg,项目名称:theano-advidx,代码行数:10,代码来源:test_advidx.py

示例8: test_any_grad

 def test_any_grad(self):
     x = tensor.bmatrix("x")
     x_all = x.any()
     gx = theano.grad(x_all, x)
     f = theano.function([x], gx)
     x_random = self.rng.binomial(n=1, p=0.5, size=(5, 7)).astype("int8")
     for x_val in (x_random, numpy.zeros_like(x_random), numpy.ones_like(x_random)):
         gx_val = f(x_val)
         assert gx_val.shape == x_val.shape
         assert numpy.all(gx_val == 0)
开发者ID:souravsingh,项目名称:Theano,代码行数:10,代码来源:test_elemwise.py

示例9: make_node

 def make_node(self, state, time):
     """
     make node ...
     :param state:
     :param time:
     :return:
     """
     state = T.as_tensor_variable(state)
     time = T.as_tensor_variable(time)
     return theano.Apply(self, [state, time], [T.bmatrix()])
开发者ID:Ilya-Simkin,项目名称:MusicGuru-RNN-Composer,代码行数:10,代码来源:DeepLearningHandler.py

示例10: use_target

 def use_target(self, target, dtype):
   if target in self.y: return
   if target == "null": return
   if target == 'sizes' and not 'sizes' in self.n_out: #TODO(voigtlaender): fix data please
     self.n_out['sizes'] = [2,1]
   if self.base_network:
     self.base_network.use_target(target=target, dtype=dtype)
     if not self.y is self.base_network.y:
       self.y[target] = self.base_network.y[target]
     if not self.j is self.base_network.j:
       self.j[target] = self.base_network.j[target]
     if target not in self.n_out:
       self.n_out[target] = self.base_network.n_out[target]
     return
   if target.endswith("[sparse:coo]"):
     tprefix = target[:target.index("[")]
     ndim = self.n_out[target][1]  # expected (without batch), e.g. 2 if like (time,feature)
     # For each coordinate axe. Also with batch-dim.
     for i in range(ndim):
       self.y["%s[sparse:coo:%i:%i]" % (tprefix, ndim, i)] = T.TensorType("int32", (False,) * 2)('y_%s[sparse:coo:%i:%i]' % (tprefix, ndim, i))
     # And the data itself. Also with batch-dim.
     self.y["%s[sparse:coo:%i:%i]" % (tprefix, ndim, ndim)] = \
       T.TensorType(dtype, (False,) * 2)("y_%s[%i]" % (tprefix, ndim))
     # self.j will be used to get the list of keys we need to get from the dataset.
     for i in range(ndim + 1):
       self.j.setdefault("%s[sparse:coo:%i:%i]" % (tprefix, ndim, i), T.bmatrix('j_%s[sparse:coo:%i:%i]' % (tprefix, ndim, i)))
     # self.y[target] will be given to the OutputLayer.
     self.y[target] = tuple(self.y["%s[sparse:coo:%i:%i]" % (tprefix, ndim, i)] for i in range(ndim + 1))
     self.j[target] = self.j["data"]  # Not sure if this is the best we can do...
     return
   assert target in self.n_out
   ndim = self.n_out[target][1] + 1  # one more because of batch-dim
   self.y[target] = T.TensorType(dtype, (False,) * ndim)('y_%s' % target)
   self.y[target].n_out = self.n_out[target][0]
   self.j.setdefault(target, T.bmatrix('j_%s' % target))
   if getattr(self.y[target].tag, "test_value", None) is None:
     if ndim == 2:
       self.y[target].tag.test_value = numpy.zeros((3,2), dtype='int32')
     elif ndim == 3:
       self.y[target].tag.test_value = numpy.random.rand(3,2,self.n_out[target][0]).astype('float32')
   if getattr(self.j[target].tag, "test_value", None) is None:
     self.j[target].tag.test_value = numpy.ones((3,2), dtype="int8")
开发者ID:rwth-i6,项目名称:returnn,代码行数:42,代码来源:Network.py

示例11: __init__

    def __init__(self, nin, nout, nhid, numpy_rng, scale=1.0):
        self.nin = nin
        self.nout = nout
        self.nhid = nhid
        self.numpy_rng = numpy_rng
        self.scale = np.float32(scale)

        self.inputs = T.fmatrix('inputs')
        self.inputs.tag.test_value = numpy_rng.uniform(
            low=-1., high=1.,
            size=(16, 5 * self.nin)
        ).astype(np.float32)
        self.targets = T.fmatrix('targets')
        self.targets.tag.test_value = np.ones(
            (16, 5 * nout), dtype=np.float32)
        self.masks = T.bmatrix('masks')
        self.masks.tag.test_value = np.ones(
            (16, 5), dtype=np.int8)
        self.batchsize = self.inputs.shape[0]

        self.inputs_frames = self.inputs.reshape((
            self.batchsize, self.inputs.shape[1] / nin,
            nin)).dimshuffle(1, 0, 2)
        self.targets_frames = self.targets.reshape((
            self.batchsize, self.targets.shape[1] / nout,
            nout)).dimshuffle(1, 0, 2)
        self.masks_frames = self.masks.T

        self.h0 = theano.shared(value=np.ones(
            nhid, dtype=theano.config.floatX) * np.float32(.5), name='h0')
        self.win = theano.shared(value=self.numpy_rng.normal(
            loc=0, scale=0.001, size=(nin, nhid)
        ).astype(theano.config.floatX), name='win')
        self.wrnn = theano.shared(value=self.scale * np.eye(
            nhid, dtype=theano.config.floatX), name='wrnn')
        self.wout = theano.shared(value=self.numpy_rng.uniform(
            low=-0.01, high=0.01, size=(nhid, nout)
        ).astype(theano.config.floatX), name='wout')
        self.bout = theano.shared(value=np.zeros(
            nout, dtype=theano.config.floatX), name='bout')

        self.params = [self.win, self.wrnn, self.wout, self.bout]

        (self.hiddens, self.outputs), self.updates = theano.scan(
            fn=self.step, sequences=self.inputs_frames,
            outputs_info=[T.alloc(
                self.h0, self.batchsize, self.nhid), None])

        self._stepcosts = T.sum((self.targets_frames - self.outputs)**2, axis=2)
        self._cost = T.switch(self.masks_frames > 0, self._stepcosts, 0).mean()
        self._grads = T.grad(self._cost, self.params)

        self.getoutputs = theano.function(
            [self.inputs], self.outputs)
开发者ID:saebrahimi,项目名称:RATM,代码行数:54,代码来源:rnn.py

示例12: __init__

    def __init__(self, layers, err_func, backprop_func, backprop_params,
                 l_rate, batch_size=10):
        """
        :param layers:
        :param err_func: cost/error function
        :param backprop_func: backpropagation function
        :param backprop_params: parameters to pass to backprop function
        :param l_rate: learning rate
        :param batch_size: (mini-) batch size. In comparison to regular nets
        :return:
        """
        super(ConvNet, self).__init__("ConvNet", l_rate, batch_size)
        logging.info('\tConstructing ConvNet with %s layers. Learning rate: %s. Batch size: %s ',
                     len(layers), l_rate, batch_size)

        input_data = T.fmatrix('X')
        input_labels = T.bmatrix('Y')

        params = []  # Regular weights and bias weights; e.g. everything to be adjusted during training
        for layer in layers:
            for param in layer.params:
                params.append(param)
        logging.info('\tNumber of parameters to train: %s',
                     sum(param.get_value(borrow=True, return_internal_type=True).size for param in params))

        layers[0].activate(input_data, self.batch_size)
        for i in range(1, len(layers)):
            prev_layer = layers[i-1]
            current_layer = layers[i]
            current_layer.activate(prev_layer.output(), self.batch_size)

        output_layer = layers[-1].output_values
        cost = err_func(output_layer, input_labels)
        updates = backprop_func(cost, params, l_rate, **backprop_params)

        prediction = T.argmax(output_layer, axis=1)
        prediction_value = T.max(output_layer, axis=1)

        logging.debug('\tConstructing functions ...')
        self.trainer = theano.function(
            inputs=[input_data, input_labels],
            outputs=cost,
            updates=updates,
            name='Trainer',
            allow_input_downcast=True  # Allows float64 to be casted as float32, which is necessary in order to use GPU
        )
        self.predictor = theano.function(
            inputs=[input_data],
            outputs={'char_as_int': prediction,
                     'char_probability': prediction_value,
                     'output_layer': output_layer},
            name='Predictor',
            allow_input_downcast=True
        )
开发者ID:tsoernes,项目名称:tdt4137,代码行数:54,代码来源:convnet.py

示例13: __init__

    def __init__(self, nin, nout, nhid, numpy_rng, scale=1.0):
        self.nin = nin
        self.nout = nout
        self.nhid = nhid
        self.numpy_rng = numpy_rng
        self.theano_rng = RandomStreams(1)
        self.scale = np.float32(scale)

        self.inputs = T.fmatrix('inputs')
        self.targets = T.imatrix('targets')
        self.masks = T.bmatrix('masks')
        self.batchsize = self.inputs.shape[0]

        self.inputs_frames = self.inputs.reshape((
            self.batchsize, self.inputs.shape[1]/nin, nin)).dimshuffle(1,0,2)
        self.targets_frames = self.targets.T
        self.masks_frames = self.masks.T

        self.win = theano.shared(value=self.numpy_rng.normal(
            loc=0, scale=0.001, size=(nin, nhid)
        ).astype(theano.config.floatX), name='win')
        self.wrnn = theano.shared(value=self.scale * np.eye(
            nhid, dtype=theano.config.floatX), name='wrnn')
        self.wout = theano.shared(value=self.numpy_rng.uniform(
            low=-0.01, high=0.01, size=(nhid, nout)
        ).astype(theano.config.floatX), name='wout')
        self.bout = theano.shared(value=np.zeros(
            nout, dtype=theano.config.floatX), name='bout')

        self.params = [self.win, self.wrnn, self.wout, self.bout]

        (self.hiddens, self.outputs), self.updates = theano.scan(
            fn=self.step, sequences=self.inputs_frames,
            outputs_info=[self.theano_rng.uniform(low=0, high=1, size=(
                self.batchsize, nhid), dtype=theano.config.floatX), None])

        self.probabilities = T.nnet.softmax(self.outputs.reshape((
            self.outputs.shape[0] * self.outputs.shape[1],
            self.nout)))
        self.probabilities = T.clip(self.probabilities, 1e-6, 1-1e-6)

        self._stepcosts = T.nnet.categorical_crossentropy(
            self.probabilities, self.targets_frames.flatten()).reshape(
                self.targets_frames.shape)

        self._cost = T.switch(T.gt(self.masks_frames, 0), self._stepcosts, 0).mean()
        self._grads = T.grad(self._cost, self.params)

        self.get_classifications = theano.function(
            [self.inputs], T.argmax(self.probabilities.reshape(self.outputs.shape), axis=2).T)
开发者ID:OuYag,项目名称:Emotion-Recognition-RNN,代码行数:50,代码来源:rnn.py

示例14: __init__

    def __init__(self, name, config):
        super().__init__(name)
        self.config = config

        self.param('src_embeddings',
                   (len(config['src_encoder']), config['src_embedding_dims']),
                   init_f=Gaussian(fan_in=config['src_embedding_dims']))
        self.param('trg_embeddings',
                   (len(config['trg_encoder']), config['trg_embedding_dims']),
                   init_f=Gaussian(fan_in=config['trg_embedding_dims']))
        self.add(Linear('hidden',
                        config['decoder_state_dims'],
                        config['trg_embedding_dims']))
        self.add(Linear('emission',
                        config['trg_embedding_dims'],
                        len(config['trg_encoder']),
                        w=self._trg_embeddings.T))
        for prefix, backwards in (('fwd', False), ('back', True)):
            self.add(Sequence(
                prefix+'_encoder', LSTM, backwards,
                config['src_embedding_dims'] + (
                    config['encoder_state_dims'] if backwards else 0),
                config['encoder_state_dims'],
                layernorm=config['encoder_layernorm'],
                dropout=config['encoder_dropout'],
                trainable_initial=True,
                offset=0))
        self.add(Sequence(
            'decoder', LSTM, False,
            config['trg_embedding_dims'],
            config['decoder_state_dims'],
            layernorm=config['decoder_layernorm'],
            dropout=config['decoder_dropout'],
            attention_dims=config['attention_dims'],
            attended_dims=2*config['encoder_state_dims'],
            trainable_initial=False,
            offset=-1))

        h_t = T.matrix('h_t')
        self.predict_fun = function(
                [h_t],
                T.nnet.softmax(self.emission(T.tanh(self.hidden(h_t)))))

        inputs = T.lmatrix('inputs')
        inputs_mask = T.bmatrix('inputs_mask')
        self.encode_fun = function(
                [inputs, inputs_mask],
                self.encode(inputs, inputs_mask))
开发者ID:robertostling,项目名称:bnas,代码行数:48,代码来源:nmt.py

示例15: build_network

def build_network(input_size,hidden_size,constraint_adj=False):
	P = Parameters()
	X = T.bmatrix('X')
	
	P.W_input_hidden = U.initial_weights(input_size,hidden_size)
	P.b_hidden       = U.initial_weights(hidden_size)
	P.b_output       = U.initial_weights(input_size)
	hidden_lin = T.dot(X,P.W_input_hidden)+P.b_hidden
	hidden = T.nnet.sigmoid(hidden_lin)
	output = T.nnet.softmax(T.dot(hidden,P.W_input_hidden.T) + P.b_output)
	parameters = P.values() 
	cost = build_error(X,output,P) 
	if constraint_adj:pass
		#cost = cost + adjacency_constraint(hidden_lin)

	return X,output,cost,P
开发者ID:shawntan,项目名称:viz-speech,代码行数:16,代码来源:order_constraint.py


注:本文中的theano.tensor.bmatrix函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。