当前位置: 首页>>代码示例>>Python>>正文


Python tensor.unbroadcast方法代码示例

本文整理汇总了Python中theano.tensor.unbroadcast方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.unbroadcast方法的具体用法?Python tensor.unbroadcast怎么用?Python tensor.unbroadcast使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.unbroadcast方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_output

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import unbroadcast [as 别名]
def get_output(self, train=False):
        X = self.get_input(train)  # shape: (nb_samples, time (padded with zeros), input_dim)
        # new shape: (time, nb_samples, input_dim) -> because theano.scan iterates over main dimension
        padded_mask = self.get_padded_shuffled_mask(train, X, pad=1)
        X = X.dimshuffle((1, 0, 2))
        x = T.dot(X, self.W) + self.b

        # scan = theano symbolic loop.
        # See: http://deeplearning.net/software/theano/library/scan.html
        # Iterate over the first dimension of the x array (=time).
        outputs, updates = theano.scan(
            self._step,  # this will be called with arguments (sequences[i], outputs[i-1], non_sequences[i])
            sequences=[x, dict(input=padded_mask, taps=[-1])],  # tensors to iterate over, inputs to _step
            # initialization of the output. Input to _step with default tap=-1.
            outputs_info=T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1),
            non_sequences=self.U,  # static inputs to _step
            truncate_gradient=self.truncate_gradient)

        if self.return_sequences:
            return outputs.dimshuffle((1, 0, 2))
        return outputs[-1] 
开发者ID:lllcho,项目名称:CAPTCHA-breaking,代码行数:23,代码来源:recurrent.py

示例2: _train_fprop

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import unbroadcast [as 别名]
def _train_fprop(self, state_below):
        X = state_below.dimshuffle((1, 0, 2))

        xi = T.dot(X, self.W_i) + self.b_i
        xf = T.dot(X, self.W_f) + self.b_f
        xc = T.dot(X, self.W_c) + self.b_c
        xo = T.dot(X, self.W_o) + self.b_o

        [outputs, memories], updates = theano.scan(
            self._step,
            sequences=[xi, xf, xo, xc],
            outputs_info=[
                T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1),
                T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)
            ],
            non_sequences=[self.U_i, self.U_f, self.U_o, self.U_c],
            truncate_gradient=self.truncate_gradient)

        if self.return_sequences:
            return outputs.dimshuffle((1, 0, 2))
        return outputs[-1] 
开发者ID:hycis,项目名称:Mozi,代码行数:23,代码来源:recurrent.py

示例3: get_forward_output

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import unbroadcast [as 别名]
def get_forward_output(self, state_below):
        X = state_below.dimshuffle((1,0,2))

        xi = T.dot(X, self.W_i) + self.b_i
        xf = T.dot(X, self.W_f) + self.b_f
        xc = T.dot(X, self.W_c) + self.b_c
        xo = T.dot(X, self.W_o) + self.b_o

        [outputs, memories], updates = theano.scan(
            self._forward_step,
            sequences=[xi, xf, xo, xc],
            outputs_info=[
                T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1),
                T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)
            ],
            non_sequences=[self.U_i, self.U_f, self.U_o, self.U_c],
            truncate_gradient=self.truncate_gradient
        )
        return outputs.dimshuffle((1,0,2)) 
开发者ID:hycis,项目名称:Mozi,代码行数:21,代码来源:recurrent.py

示例4: get_backward_output

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import unbroadcast [as 别名]
def get_backward_output(self, state_below):
        X = state_below.dimshuffle((1,0,2))

        xi = T.dot(X, self.Wb_i) + self.bb_i
        xf = T.dot(X, self.Wb_f) + self.bb_f
        xc = T.dot(X, self.Wb_c) + self.bb_c
        xo = T.dot(X, self.Wb_o) + self.bb_o

        [outputs, memories], updates = theano.scan(
            self._forward_step,
            sequences=[xi, xf, xo, xc],
            outputs_info=[
                T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1),
                T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1)
            ],
            non_sequences=[self.Ub_i, self.Ub_f, self.Ub_o, self.Ub_c],
            go_backwards = True,
            truncate_gradient=self.truncate_gradient
        )
        return outputs.dimshuffle((1,0,2)) 
开发者ID:hycis,项目名称:Mozi,代码行数:22,代码来源:recurrent.py

示例5: create_empty

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import unbroadcast [as 别名]
def create_empty(cls, batch_size, num_node_ids, node_state_size, num_edge_types):
        """
        Create an empty graph state with the specified sizes. Note that this
        will contain one zero-strength element to prevent nasty GPU errors
        from a dimension with 0 in it.

            batch_size: Number of batches
            num_node_ids: An integer giving size of node id
            node_state_size: An integer giving size of node state
            num_edge_types: An integer giving number of edge types
        """
        return cls( T.unbroadcast(T.zeros([batch_size, 1]), 1),
                    T.unbroadcast(T.zeros([batch_size, 1, num_node_ids]), 1),
                    T.unbroadcast(T.zeros([batch_size, 1, node_state_size]), 1),
                    T.unbroadcast(T.zeros([batch_size, 1, 1, num_edge_types]), 1, 2)) 
开发者ID:hexahedria,项目名称:gated-graph-transformer-network,代码行数:17,代码来源:graph_state.py

示例6: zeros_nobroadcast

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import unbroadcast [as 别名]
def zeros_nobroadcast(shape, dtype=theano.config.floatX):
    zeros = T.zeros(shape, dtype=dtype)
    zeros = T.unbroadcast(zeros, *range(len(shape)))
    return zeros 
开发者ID:stanfordnlp,项目名称:spinn,代码行数:6,代码来源:theano_internal.py

示例7: test_broadcast

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import unbroadcast [as 别名]
def test_broadcast(self):
        # Test that we can rebroadcast
        data = numpy.random.rand(10, 10).astype('float32')
        output_var = f32sc(name="output", value=data)

        up = tensor.unbroadcast(output_var.sum().dimshuffle('x', 'x'), 0, 1)
        output_func = theano.function(inputs=[], outputs=[],
                                      updates=[(output_var, up)])
        output_func()

        up = tensor.patternbroadcast(output_var.sum().dimshuffle('x', 'x'),
                                     output_var.type.broadcastable)
        output_func = theano.function(inputs=[], outputs=[],
                                      updates=[(output_var, up)])
        output_func() 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:17,代码来源:test_var.py

示例8: test_rebroadcast

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import unbroadcast [as 别名]
def test_rebroadcast():
    d = numpy.random.rand(10, 10).astype('float32')
    v = theano.tensor.fmatrix()
    up = tensor.unbroadcast(v.sum().dimshuffle('x', 'x'), 0, 1)
    f = theano.function([v], [up], mode=mode_with_gpu)

    f(d)

    topo = f.maker.fgraph.toposort()
    rebrs = [node for node in topo if isinstance(node.op, tensor.Rebroadcast)]
    assert len(rebrs) == 1
    rebr = rebrs[0]

    assert isinstance(rebr.inputs[0].type, GpuArrayType)
    assert isinstance(rebr.outputs[0].type, GpuArrayType) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:17,代码来源:test_opt.py

示例9: test_rebroadcast

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import unbroadcast [as 别名]
def test_rebroadcast(self):
        # I need the sum, because the setup expects the output to be a
        # vector
        self.check_rop_lop(tensor.unbroadcast(
            self.x[:4].dimshuffle('x', 0), 0).sum(axis=1),
            (1,)) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:8,代码来源:test_rop.py

示例10: _e_step

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import unbroadcast [as 别名]
def _e_step(psamples, W_list, b_list, n_steps=100, eps=1e-5):
    """
    Performs 'n_steps' of mean-field inference (used to compute positive phase
    statistics)

    Parameters
    ----------
    psamples : array-like object of theano shared variables
        State of each layer of the DBM (during the inference process).
        psamples[0] points to the input
    n_steps :  integer
        Number of iterations of mean-field to perform
    """
    depth = len(psamples)

    new_psamples = [T.unbroadcast(T.shape_padleft(psample))
                    for psample in psamples]

    # now alternate mean-field inference for even/odd layers
    def mf_iteration(*psamples):
        new_psamples = [p for p in psamples]
        for i in xrange(1, depth, 2):
            new_psamples[i] = hi_given(psamples, i, W_list, b_list)
        for i in xrange(2, depth, 2):
            new_psamples[i] = hi_given(psamples, i, W_list, b_list)

        score = 0.
        for i in xrange(1, depth):
            score = T.maximum(T.mean(abs(new_psamples[i] - psamples[i])),
                              score)

        return new_psamples, theano.scan_module.until(score < eps)

    new_psamples, updates = scan(
        mf_iteration,
        states=new_psamples,
        n_steps=n_steps
    )

    return [x[0] for x in new_psamples] 
开发者ID:zchengquan,项目名称:TextDetector,代码行数:42,代码来源:dbm_metrics.py

示例11: outputs_info

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import unbroadcast [as 别名]
def outputs_info(self, batch_size):
        ones_vector = T.ones((batch_size, 1))
        hid_init = T.dot(ones_vector, self.hid_init)
        hid_init = T.unbroadcast(hid_init, 0)
        return [hid_init, hid_init] 
开发者ID:snipsco,项目名称:ntm-lasagne,代码行数:7,代码来源:controllers.py

示例12: build_sampler

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import unbroadcast [as 别名]
def build_sampler(self, n_samples, n_steps, T, c):
        states = [TT.zeros(shape=(n_samples,), dtype='int64'),
                TT.zeros(shape=(n_samples,), dtype='float32')]

        init_c = c[0, -self.state['dim']:]
        states += [ReplicateLayer(n_samples)(init(init_c).out).out for init in self.initializers]
        # added by Zhaopeng Tu, 2015-10-30
        # init_coverage
        if self.state['maintain_coverage']:
            # in sampling, init_c is two-dimension (source_length*c_dim), same for init_coverage
            # modified by Zhaopeng Tu, 2015-12-18, big mistake here!!!
            # coverage should be always 3D, the first two dimensions are consistent with alignment probs
            # while the last one is the coverage dim
            if self.state['use_linguistic_coverage'] and self.state['coverage_accumulated_operation'] == 'subtractive':
                init_coverage = TT.unbroadcast(TT.ones((c.shape[0], n_samples, self.state['coverage_dim']), dtype='float32'), 2)
            else:
                init_coverage = TT.unbroadcast(TT.zeros((c.shape[0], n_samples, self.state['coverage_dim']), dtype='float32'), 2)
            states.append(init_coverage)

        if not self.state['search']:
            c = PadLayer(n_steps)(c).out

        # Pad with final states
        non_sequences = [c, T]
        if self.state['maintain_coverage'] and self.state['use_linguistic_coverage'] and self.state['use_fertility_model']:
            fertility = self.state['max_fertility'] * self.fertility_inputer(c).out
            non_sequences.append(fertility)

        outputs, updates = theano.scan(self.sampling_step,
                outputs_info=states,
                non_sequences=non_sequences,
                sequences=[TT.arange(n_steps, dtype="int64")],
                n_steps=n_steps,
                name="{}_sampler_scan".format(self.prefix))
        if self.state['maintain_coverage']:
            if self.state['use_fertility_model'] and self.state['use_linguistic_coverage']:
                return (outputs[0], outputs[1], outputs[-1], fertility), updates
            else:
                return (outputs[0], outputs[1], outputs[-1]), updates
        else:
            return (outputs[0], outputs[1]), updates 
开发者ID:tuzhaopeng,项目名称:NMT-Coverage,代码行数:43,代码来源:encdec.py

示例13: build_sampler

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import unbroadcast [as 别名]
def build_sampler(self, n_samples, n_steps, T, c):
        states = [TT.zeros(shape=(n_samples,), dtype='int64'),
                TT.zeros(shape=(n_samples,), dtype='float32')]
        init_c = c[0, -self.state['dim']:]
        states += [ReplicateLayer(n_samples)(init(init_c).out).out for init in self.initializers]
        # added by Zhaopeng Tu, 2015-10-30
        # init_coverage
        if self.state['maintain_coverage']:
            # in sampling, init_c is two-dimension (source_length*c_dim), same for init_coverage
            if self.state['use_accumulated_coverage'] and self.state['coverage_accumulated_operation'] == 'subtractive':
                init_coverage = TT.unbroadcast(TT.ones((c.shape[0], self.state['coverage_dim']), dtype='float32'), 1)
            else:
                init_coverage = TT.unbroadcast(TT.zeros((c.shape[0], self.state['coverage_dim']), dtype='float32'), 1)
            states.append(init_coverage)

        if not self.state['search']:
            c = PadLayer(n_steps)(c).out

        # Pad with final states
        non_sequences = [c, T]

        outputs, updates = theano.scan(self.sampling_step,
                outputs_info=states,
                non_sequences=non_sequences,
                sequences=[TT.arange(n_steps, dtype="int64")],
                n_steps=n_steps,
                name="{}_sampler_scan".format(self.prefix))
        if self.state['maintain_coverage']:
            return (outputs[0], outputs[1], outputs[-1]), updates
        else:
            return (outputs[0], outputs[1]), updates 
开发者ID:tuzhaopeng,项目名称:NMT-Coverage,代码行数:33,代码来源:encdec.py

示例14: get_predictions

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import unbroadcast [as 别名]
def get_predictions(self, x, x_tilde, application_call):
        data_sample_preds = self.discriminator.apply(
            tensor.unbroadcast(tensor.concatenate([x, x_tilde], axis=0),
                               *range(x.ndim)))
        data_preds = data_sample_preds[:x.shape[0]]
        sample_preds = data_sample_preds[x.shape[0]:]

        application_call.add_auxiliary_variable(
            tensor.nnet.sigmoid(data_preds).mean(), name='data_accuracy')
        application_call.add_auxiliary_variable(
            (1 - tensor.nnet.sigmoid(sample_preds)).mean(),
            name='sample_accuracy')

        return data_preds, sample_preds 
开发者ID:fjxmlzn,项目名称:PacGAN,代码行数:16,代码来源:bricks.py

示例15: viterbi_decoding

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import unbroadcast [as 别名]
def viterbi_decoding(self, X, mask=None):
        input_energy = self.activation(K.dot(X, self.kernel) + self.bias)
        if self.use_boundary:
            input_energy = self.add_boundary_energy(
                input_energy, mask, self.left_boundary, self.right_boundary)

        argmin_tables = self.recursion(input_energy, mask, return_logZ=False)
        argmin_tables = K.cast(argmin_tables, 'int32')

        # backward to find best path, `initial_best_idx` can be any,
        # as all elements in the last argmin_table are the same
        argmin_tables = K.reverse(argmin_tables, 1)
        # matrix instead of vector is required by tf `K.rnn`
        initial_best_idx = [K.expand_dims(argmin_tables[:, 0, 0])]
        if K.backend() == 'theano':
            from theano import tensor as T
            initial_best_idx = [T.unbroadcast(initial_best_idx[0], 1)]

        def gather_each_row(params, indices):
            n = K.shape(indices)[0]
            if K.backend() == 'theano':
                from theano import tensor as T
                return params[T.arange(n), indices]
            elif K.backend() == 'tensorflow':
                import tensorflow as tf
                indices = K.transpose(K.stack([tf.range(n), indices]))
                return tf.gather_nd(params, indices)
            else:
                raise NotImplementedError

        def find_path(argmin_table, best_idx):
            next_best_idx = gather_each_row(argmin_table, best_idx[0][:, 0])
            next_best_idx = K.expand_dims(next_best_idx)
            if K.backend() == 'theano':
                from theano import tensor as T
                next_best_idx = T.unbroadcast(next_best_idx, 1)
            return next_best_idx, [next_best_idx]

        _, best_paths, _ = K.rnn(find_path, argmin_tables, initial_best_idx,
                                 input_length=K.int_shape(X)[1], unroll=self.unroll)
        best_paths = K.reverse(best_paths, 1)
        best_paths = K.squeeze(best_paths, 2)

        return K.one_hot(best_paths, self.units) 
开发者ID:keras-team,项目名称:keras-contrib,代码行数:46,代码来源:crf.py


注:本文中的theano.tensor.unbroadcast方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。