當前位置: 首頁>>代碼示例>>Python>>正文


Python tensor.shape_padaxis方法代碼示例

本文整理匯總了Python中theano.tensor.shape_padaxis方法的典型用法代碼示例。如果您正苦於以下問題:Python tensor.shape_padaxis方法的具體用法?Python tensor.shape_padaxis怎麽用?Python tensor.shape_padaxis使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.shape_padaxis方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: process

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import shape_padaxis [as 別名]
def process(self, input_vector):
        """
        Convert an input vector into a categorical distribution across num_categories categories

        Params:
            input_vector: Vector of shape (n_batch, input_width)

        Returns: Categorical distribution of shape (n_batch, 1, num_categories), such that it sums to 1 across
            all categories for each instance in the batch
        """
        transformed = self._transform_stack.process(input_vector)
        return T.shape_padaxis(transformed,1) 
開發者ID:hexahedria,項目名稱:gated-graph-transformer-network,代碼行數:14,代碼來源:output_category.py

示例2: process

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import shape_padaxis [as 別名]
def process(self, input_vector):
        """
        Convert an input vector into a probabilistic set, i.e. a list of probabilities of item i being in
        the output set.

        Params:
            input_vector: Vector of shape (n_batch, input_width)

        Returns: Set distribution of shape (n_batch, 1, num_categories), where each value is independent from
            the others.
        """
        transformed = self._transform_stack.process(input_vector)
        return T.shape_padaxis(transformed,1) 
開發者ID:hexahedria,項目名稱:gated-graph-transformer-network,代碼行數:15,代碼來源:output_set.py

示例3: process

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import shape_padaxis [as 別名]
def process(self, gstate, input_vector, dropout_masks=Ellipsis):
        """
        Process an input vector and update the state accordingly. Each node runs a GRU step
        with previous state from the node state and input from the vector.

        Params:
            gstate: A GraphState giving the current state
            input_vector: A tensor of the form (n_batch, input_width)
        """
        if dropout_masks is Ellipsis:
            dropout_masks = None
            append_masks = False
        else:
            append_masks = True

        # gstate.edge_states is of shape (n_batch, n_nodes, n_nodes, id+state)
        # combined input should be broadcasted to (n_batch, n_nodes, n_nodes, X)
        input_vector_part = T.shape_padaxis(T.shape_padaxis(input_vector, 1), 2)
        source_state_part = T.shape_padaxis(T.concatenate([gstate.node_ids, gstate.node_states], 2), 2)
        dest_state_part = T.shape_padaxis(T.concatenate([gstate.node_ids, gstate.node_states], 2), 1)
        full_input = broadcast_concat([input_vector_part, source_state_part, dest_state_part], 3)

        # we flatten to process updates
        flat_input = full_input.reshape([-1, self._process_input_size])
        flat_result, dropout_masks = self._update_stack.process(flat_input, dropout_masks)
        result = flat_result.reshape([gstate.n_batch, gstate.n_nodes, gstate.n_nodes, self._graph_spec.num_edge_types, 2])
        should_set = result[:,:,:,:,0]
        should_clear = result[:,:,:,:,1]

        new_strengths = gstate.edge_strengths*(1-should_clear) + (1-gstate.edge_strengths)*should_set

        new_gstate = gstate.with_updates(edge_strengths=new_strengths)
        if append_masks:
            return new_gstate, dropout_masks
        else:
            return new_gstate 
開發者ID:hexahedria,項目名稱:gated-graph-transformer-network,代碼行數:38,代碼來源:edge_state_update.py

示例4: get_output_for

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import shape_padaxis [as 別名]
def get_output_for(self, inputs, deterministic=False, **kwargs):
        return T.shape_padaxis(inputs, axis=self.n_ax).repeat(self.n_rep, self.n_ax) 
開發者ID:kuleshov,項目名稱:deep-learning-models,代碼行數:4,代碼來源:shape.py

示例5: get_output_for

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import shape_padaxis [as 別名]
def get_output_for(self, input, **kwargs):
        output = T.shape_padaxis(input[:, 0], axis=1) * input[:, 1:];
        return output; 
開發者ID:SBU-BMI,項目名稱:u24_lymphocyte,代碼行數:5,代碼來源:ch_inner_prod.py

示例6: calc_binaryVal_negative_log_likelihood

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import shape_padaxis [as 別名]
def calc_binaryVal_negative_log_likelihood(data, probabilities, axis_to_sum=1):
	if axis_to_sum != 1:
            # addresses the case where we marginalize                                                                                                           
            data = T.extra_ops.repeat(T.shape_padaxis(data, axis=1), repeats = probabilities.shape[1], axis=1)
        return - T.sum(data * T.log(probabilities) + (1 - data) * T.log(1 - probabilities), axis=axis_to_sum) 
開發者ID:enalisnick,項目名稱:stick-breaking_dgms,代碼行數:7,代碼來源:loss_fns.py

示例7: calc_categoricalVal_negative_log_likelihood

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import shape_padaxis [as 別名]
def calc_categoricalVal_negative_log_likelihood(data, probabilities, axis_to_sum=1):
	if axis_to_sum != 1:
            # addresses the case where we marginalize                                                                                                                                                                    
            data = T.extra_ops.repeat(T.shape_padaxis(data, axis=1), repeats = probabilities.shape[1], axis=1)
        return - T.sum(data * T.log(probabilities), axis=axis_to_sum) 
開發者ID:enalisnick,項目名稱:stick-breaking_dgms,代碼行數:7,代碼來源:loss_fns.py

示例8: calc_realVal_negative_log_likelihood

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import shape_padaxis [as 別名]
def calc_realVal_negative_log_likelihood(data, recon, axis_to_sum=1):
	if axis_to_sum != 1:
		# addresses the case where we marginalize                 
		data = T.extra_ops.repeat(T.shape_padaxis(data, axis=1), repeats = recon.shape[1], axis=1)
	return .5 * T.sum( (data - recon)**2, axis=axis_to_sum ) 
開發者ID:enalisnick,項目名稱:stick-breaking_dgms,代碼行數:7,代碼來源:loss_fns.py

示例9: calc_poissonVal_negative_log_likelihood

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import shape_padaxis [as 別名]
def calc_poissonVal_negative_log_likelihood(data, recon, axis_to_sum=1):
	if axis_to_sum != 1:
		# addresses the case where we marginalize                                              
		data = T.extra_ops.repeat(T.shape_padaxis(data, axis=1), repeats = recon.shape[1], axis=1)
	return T.sum( T.exp(recon) - data * recon, axis=axis_to_sum ) 
開發者ID:enalisnick,項目名稱:stick-breaking_dgms,代碼行數:7,代碼來源:loss_fns.py

示例10: __init__

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import shape_padaxis [as 別名]
def __init__(self, rng, input, batch_size, latent_size, label_size, out_size, activation, W_z, W_y, b):
        
        # init parent class                          
        super(Marginalized_Decoder, self).__init__(rng=rng, input=input, latent_size=latent_size, out_size=out_size, activation=activation, W_z=W_z, b=b)

        # setup the params           
        self.W_y = W_y

        # compute marginalized outputs                                                                                                                 
        labels_tensor = T.extra_ops.repeat( T.shape_padaxis(T.eye(n=label_size, m=label_size), axis=0), repeats=batch_size, axis=0)
        self.output = self.activation(T.extra_ops.repeat(T.shape_padaxis(T.dot(self.input, self.W_z), axis=1), repeats=label_size, axis=1) + T.dot(labels_tensor, self.W_y) + self.b)
        
        # no params here since we'll grab them from the supervised decoder 
開發者ID:enalisnick,項目名稱:stick-breaking_dgms,代碼行數:15,代碼來源:decoders.py

示例11: process

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import shape_padaxis [as 別名]
def process(self, gstate, dropout_masks=Ellipsis):
        """
        Process a graph state.
          1. Data is transfered from each node to each other node along both forward and backward edges.
                This data is processed with a Wx+b style update, and an optional transformation is applied
          2. Nodes sum the transfered data, weighted by the existence of the other node and the edge.
          3. Nodes perform a GRU update with this input

        Params:
            gstate: A GraphState giving the current state
        """
        if dropout_masks is Ellipsis:
            dropout_masks = None
            append_masks = False
        else:
            append_masks = True

        node_obs = T.concatenate([gstate.node_ids, gstate.node_states],2)
        flat_node_obs = node_obs.reshape([-1, self._process_input_size])
        transformed, dropout_masks = self._transfer_stack.process(flat_node_obs,dropout_masks)
        transformed = transformed.reshape([gstate.n_batch, gstate.n_nodes, 2*self._graph_spec.num_edge_types, self._transfer_size])
        scaled_transformed = transformed * T.shape_padright(T.shape_padright(gstate.node_strengths))
        # scaled_transformed is of shape (n_batch, n_nodes, 2*num_edge_types, transfer_size)
        # We want to multiply  through by edge strengths, which are of shape
        # (n_batch, n_nodes, n_nodes, num_edge_types), both fwd and backward
        edge_strength_scale = T.concatenate([gstate.edge_strengths, gstate.edge_strengths.swapaxes(1,2)], 3)
        # edge_strength_scale is of (n_batch, n_nodes, n_nodes, 2*num_edge_types)
        intermed = T.shape_padaxis(scaled_transformed, 2) * T.shape_padright(edge_strength_scale)
        # intermed is of shape (n_batch, n_nodes "source", n_nodes "dest", 2*num_edge_types, transfer_size)
        # now reduce along the "source" and "edge_types" dimensions to get dest activations
        # of shape (n_batch, n_nodes, transfer_size)
        reduced_result = T.sum(T.sum(intermed, 3), 1)

        # now add information fom current node id
        full_input = T.concatenate([gstate.node_ids, reduced_result], 2)

        # we flatten to apply GRU
        flat_input = full_input.reshape([-1, self._graph_spec.num_node_ids + self._transfer_size])
        flat_state = gstate.node_states.reshape([-1, self._graph_spec.node_state_size])
        new_flat_state, dropout_masks = self._propagation_gru.step(flat_input, flat_state, dropout_masks)

        new_node_states = new_flat_state.reshape(gstate.node_states.shape)

        new_gstate = gstate.with_updates(node_states=new_node_states)
        if append_masks:
            return new_gstate, dropout_masks
        else:
            return new_gstate 
開發者ID:hexahedria,項目名稱:gated-graph-transformer-network,代碼行數:50,代碼來源:propagation.py

示例12: get_candidates

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import shape_padaxis [as 別名]
def get_candidates(self, gstate, input_vector, max_candidates, dropout_masks=None):
        """
        Get the current candidate new nodes. This is accomplished as follows:
          1. The proposer network, conditioned on the input vector, proposes multiple candidate nodes,
                along with a confidence
          2. Every existing node, conditioned on its own state and the candidate, votes on whether or not
                to accept this node
          3. A new node is created for each candidate node, with an existence strength given by
                confidence * [product of all votes], and an initial state state as proposed
        This method directly returns these new nodes for comparision

        Params:
            gstate: A GraphState giving the current state
            input_vector: A tensor of the form (n_batch, input_width)
            max_candidates: Integer, limit on the number of candidates to produce

        Returns:
            new_strengths: A tensor of the form (n_batch, new_node_idx)
            new_ids: A tensor of the form (n_batch, new_node_idx, num_node_ids)
        """
        n_batch = gstate.n_batch
        n_nodes = gstate.n_nodes
        outputs_info = [self._proposer_gru.initial_state(n_batch)]
        proposer_step = lambda st,ipt,*dm: self._proposer_gru.step(ipt,st,dm if dropout_masks is not None else None)
        raw_proposal_acts, _ = theano.scan(proposer_step, n_steps=max_candidates, non_sequences=[input_vector]+(dropout_masks if dropout_masks is not None else []), outputs_info=outputs_info)

        # raw_proposal_acts is of shape (candidate, n_batch, blah)
        flat_raw_acts = raw_proposal_acts.reshape([-1, self._proposal_width])
        flat_processed_acts = self._proposer_stack.process(flat_raw_acts)
        candidate_strengths = T.nnet.sigmoid(flat_processed_acts[:,0]).reshape([max_candidates, n_batch])
        candidate_ids = T.nnet.softmax(flat_processed_acts[:,1:]).reshape([max_candidates, n_batch, self._graph_spec.num_node_ids])

        # Votes will be of shape (candidate, n_batch, n_nodes)
        # To generate this we want to assemble (candidate, n_batch, n_nodes, input_stuff),
        # squash to (parallel, input_stuff), do voting op, then unsquash
        candidate_id_part = T.shape_padaxis(candidate_ids, 2)
        node_id_part = T.shape_padaxis(gstate.node_ids, 0)
        node_state_part = T.shape_padaxis(gstate.node_states, 0)
        full_vote_input = broadcast_concat([node_id_part, node_state_part, candidate_id_part], 3)
        flat_vote_input = full_vote_input.reshape([-1, full_vote_input.shape[-1]])
        vote_result = self._vote_stack.process(flat_vote_input)
        final_votes_no = vote_result.reshape([max_candidates, n_batch, n_nodes])
        weighted_votes_yes = 1 - final_votes_no * T.shape_padleft(gstate.node_strengths)
        # Add in the strength vote
        all_votes = T.concatenate([T.shape_padright(candidate_strengths), weighted_votes_yes], 2)
        # Take the product -> (candidate, n_batch)
        chosen_strengths = T.prod(all_votes, 2)

        new_strengths = chosen_strengths.dimshuffle([1,0])
        new_ids = candidate_ids.dimshuffle([1,0,2])
        return new_strengths, new_ids 
開發者ID:hexahedria,項目名稱:gated-graph-transformer-network,代碼行數:53,代碼來源:new_nodes_vote.py


注:本文中的theano.tensor.shape_padaxis方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。