當前位置: 首頁>>代碼示例>>Python>>正文


Python torch.fmod方法代碼示例

本文整理匯總了Python中torch.fmod方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.fmod方法的具體用法?Python torch.fmod怎麽用?Python torch.fmod使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在torch的用法示例。


在下文中一共展示了torch.fmod方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import fmod [as 別名]
def forward(self, x):
		x = self.feature_model(x)
		y0 = self.fc(x)
		Pc = F.softmax(y0, dim=1)
		y1 = torch.stack([self.bin_models[i](x) for i in range(self.num_classes)]).permute(1, 2, 0)
		Pl = F.softmax(y1, dim=1)
		Plc = Pl * torch.unsqueeze(Pc, dim=1)
		ind = torch.argmax(Plc.view(x.size(0), -1), dim=1, keepdim=True)
		ip = ind/self.num_classes
		ic = torch.fmod(ind, self.num_classes)
		label = torch.zeros(ic.size(0), self.num_classes).scatter_(1, ic.data.cpu(), 1.0)
		label = Variable(label.unsqueeze(2).cuda())
		y1 = torch.squeeze(torch.bmm(y1, label), 2)
		if not args.multires:
			y2 = torch.stack([self.res_models[i](x) for i in range(self.num_classes)]).permute(1, 2, 0)
			y2 = torch.squeeze(torch.bmm(y2, label), 2)
		else:
			y2 = torch.stack([self.res_models[i](x) for i in range(self.num_classes * self.num_clusters)])
			y2 = y2.view(self.num_classes, self.num_clusters, -1, self.ndim).permute(1, 2, 3, 0)
			y2 = torch.squeeze(torch.matmul(y2, label), 3)
			pose_label = torch.zeros(ip.size(0), self.num_clusters).scatter_(1, ip.data.cpu(), 1.0)
			pose_label = Variable(pose_label.unsqueeze(2).cuda())
			y2 = torch.squeeze(torch.bmm(y2.permute(1, 2, 0), pose_label), 2)
		return [y0, y1, y2, Plc]   # cat, pose_bin, pose_delta 
開發者ID:JHUVisionLab,項目名稱:multi-modal-regression,代碼行數:26,代碼來源:learnJointCatPoseModel_top1_new.py

示例2: decompose

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import fmod [as 別名]
def decompose(tensor, field):
    """decompose a tensor into its binary representation."""
    torch_dtype = get_torch_dtype(field)
    n_bits = get_n_bits(field)
    powers = torch.arange(n_bits, dtype=torch_dtype)
    if hasattr(tensor, "child") and isinstance(tensor.child, dict):
        powers = powers.send(*list(tensor.child.keys()), **no_wrap)
    for _ in range(len(tensor.shape)):
        powers = powers.unsqueeze(0)
    tensor = tensor.unsqueeze(-1)
    moduli = 2 ** powers
    tensor = torch.fmod((tensor / moduli.type_as(tensor)), 2)
    return tensor 
開發者ID:OpenMined,項目名稱:PySyft,代碼行數:15,代碼來源:securenn.py

示例3: _test_attention

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import fmod [as 別名]
def _test_attention(self, attention):
        dummy_source_hids = torch.rand(self.src_len, self.bsz, self.ctx_dim)
        dummy_decoder_state = torch.rand(self.bsz, self.dec_dim)
        dummy_src_lengths = torch.fmod(torch.arange(self.bsz), self.src_len) + 1
        attention(dummy_decoder_state, dummy_source_hids, dummy_src_lengths) 
開發者ID:pytorch,項目名稱:translate,代碼行數:7,代碼來源:test_attention.py

示例4: fmod

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import fmod [as 別名]
def fmod(t1, t2):
    """
    Element-wise division remainder of values of operand t1 by values of operand t2 (i.e. C Library function fmod), not commutative.
    Takes the two operands (scalar or tensor, both may contain floating point number) whose elements are to be
    divided (operand 1 by operand 2) as arguments.

    Parameters
    ----------
    t1: tensor or scalar
        The first operand whose values are divided (may be floats)
    t2: tensor or scalar
        The second operand by whose values is divided (may be floats)

    Returns
    -------
    result: ht.DNDarray
        A tensor containing the remainder of the element-wise division (i.e. floating point values) of t1 by t2.
        It has the sign as the dividend t1.

    Examples:
    ---------
    >>> import heat as ht
    >>> ht.fmod(2.0, 2.0)
    tensor([0.])

    >>> T1 = ht.float32([[1, 2], [3, 4]])
    >>> T2 = ht.float32([[2, 2], [2, 2]])
    >>> ht.fmod(T1, T2)
    tensor([[1., 0.],
            [1., 0.]])

    >>> s = 2.0
    >>> ht.fmod(s, T1)
    tensor([[0., 0.]
            [2., 2.]])
    """
    return operations.__binary_op(torch.fmod, t1, t2) 
開發者ID:helmholtz-analytics,項目名稱:heat,代碼行數:39,代碼來源:arithmetics.py

示例5: index_select_state

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import fmod [as 別名]
def index_select_state(self, state, best_ids):
        """Select CTC states according to best ids

        :param state    : CTC state
        :param best_ids : index numbers selected by beam pruning (B, W)
        :return selected_state
        """
        r, s, f_min, f_max, scoring_idmap = state
        # convert ids to BWO space
        vidx = (best_ids + self.pad_bo).view(-1)
        # select hypothesis scores
        s_new = torch.index_select(s.view(-1), 0, vidx)
        s_new = s_new.view(-1, 1).repeat(1, self.odim).view(self.n_bb, self.odim)
        # convert ids to BWS space (S: scoring_num)
        if scoring_idmap is not None:
            snum = self.scoring_num
            beam_idx = (torch.div(best_ids, self.odim) + self.pad_b).view(-1)
            label_ids = torch.fmod(best_ids, self.odim).view(-1)
            score_idx = scoring_idmap[beam_idx, label_ids]
            score_idx[score_idx == -1] = 0
            vidx = score_idx + beam_idx * snum
        else:
            snum = self.odim
        # select forward probabilities
        r_new = torch.index_select(r.view(-1, 2, self.n_bb * snum), 2, vidx).view(
            -1, 2, self.n_bb
        )
        return r_new, s_new, f_min, f_max 
開發者ID:espnet,項目名稱:espnet,代碼行數:30,代碼來源:ctc_prefix_score.py

示例6: myProj

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import fmod [as 別名]
def myProj(x):
	angle = torch.norm(x, 2, 1, True)
	axis = F.normalize(x)
	angle = torch.fmod(angle, 2*np.pi)
	return angle*axis


# my model for pose estimation: feature model + 1layer pose model x 12 
開發者ID:JHUVisionLab,項目名稱:multi-modal-regression,代碼行數:10,代碼來源:evaluateGeodesicRegressionModel.py

示例7: testing

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import fmod [as 別名]
def testing():
	model.eval()
	ytrue_cat, ytrue_pose = [], []
	ypred_cat, ypred_pose = [], []
	for i, sample in enumerate(test_loader):
		xdata = Variable(sample['xdata'].cuda())
		output = model(xdata)
		output_cat = output[0]
		output_bin = output[1]
		output_res = output[2]
		joint_probs = output[3]
		ind = torch.argmax(joint_probs.view(xdata.size(0), -1), dim=1)
		ip = ind/num_classes
		ic = torch.fmod(ind, num_classes)
		tmp_labels = ic.data.cpu().numpy()
		ypred_cat.append(tmp_labels)
		label = Variable(sample['label'])
		ytrue_cat.append(sample['label'].squeeze().numpy())
		ypred_bin = ip.data.cpu().numpy()
		ypred_res = output_res.data.cpu().numpy()
		ypred_pose.append(kmeans_dict[ypred_bin, :] + ypred_res)
		ytrue_pose.append(sample['ydata'].numpy())
		del xdata, label, output, sample, output_cat, output_bin, output_res, joint_probs, ind, ip, ic
		gc.collect()
	ytrue_cat = np.concatenate(ytrue_cat)
	ypred_cat = np.concatenate(ypred_cat)
	ytrue_pose = np.concatenate(ytrue_pose)
	ypred_pose = np.concatenate(ypred_pose)
	model.train()
	return ytrue_cat, ytrue_pose, ypred_cat, ypred_pose 
開發者ID:JHUVisionLab,項目名稱:multi-modal-regression,代碼行數:32,代碼來源:learnJointCatPoseModel_top1_new.py

示例8: myProj

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import fmod [as 別名]
def myProj(x):
	angle = torch.norm(x, 2, 1, True)
	axis = F.normalize(x)
	angle = torch.fmod(angle, np.pi)
	return angle*axis


# my_model 
開發者ID:JHUVisionLab,項目名稱:multi-modal-regression,代碼行數:10,代碼來源:learnGeodesicRegressionModel.py

示例9: index_select_state

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import fmod [as 別名]
def index_select_state(self, state, best_ids):
        """Select CTC states according to best ids

        :param state    : CTC state
        :param best_ids : index numbers selected by beam pruning (B, W)
        :return selected_state
        """
        r, s, f_min, f_max, scoring_idmap = state
        # convert ids to BWO space
        vidx = (best_ids + self.pad_bo).view(-1)
        # select hypothesis scores
        s_new = torch.index_select(s.view(-1), 0, vidx)
        s_new = s_new.view(-1, 1).repeat(1, self.odim).view(self.n_bb, self.odim)
        # convert ids to BWS space (S: scoring_num)
        if scoring_idmap is not None:
            snum = self.scoring_num
            beam_idx = (torch.div(best_ids, self.odim) + self.pad_b).view(-1)
            label_ids = torch.fmod(best_ids, self.odim).view(-1)
            score_idx = scoring_idmap[beam_idx, label_ids]
            score_idx[score_idx == -1] = 0
            vidx = score_idx + beam_idx * snum
        else:
            snum = self.odim
        # select forward probabilities
        r_new = torch.index_select(r.view(-1, 2, self.n_bb * snum), 2, vidx).view(-1, 2, self.n_bb)
        return r_new, s_new, f_min, f_max 
開發者ID:DigitalPhonetics,項目名稱:adviser,代碼行數:28,代碼來源:ctc_prefix_score.py

示例10: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import fmod [as 別名]
def __init__(self, z_num, z_dim):
        super(Embedding, self).__init__()

        self.z_list = nn.ParameterList()
        self.z_num = z_num
        self.z_dim = z_dim

        h,k = self.z_num

        for i in range(h):
            for j in range(k):
                self.z_list.append(Parameter(torch.fmod(torch.randn(self.z_dim).cuda(), 2))) 
開發者ID:g1910,項目名稱:HyperNetworks,代碼行數:14,代碼來源:primary_net.py

示例11: __init__

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import fmod [as 別名]
def __init__(self, f_size = 3, z_dim = 64, out_size=16, in_size=16):
        super(HyperNetwork, self).__init__()
        self.z_dim = z_dim
        self.f_size = f_size
        self.out_size = out_size
        self.in_size = in_size

        self.w1 = Parameter(torch.fmod(torch.randn((self.z_dim, self.out_size*self.f_size*self.f_size)).cuda(),2))
        self.b1 = Parameter(torch.fmod(torch.randn((self.out_size*self.f_size*self.f_size)).cuda(),2))

        self.w2 = Parameter(torch.fmod(torch.randn((self.z_dim, self.in_size*self.z_dim)).cuda(),2))
        self.b2 = Parameter(torch.fmod(torch.randn((self.in_size*self.z_dim)).cuda(),2)) 
開發者ID:g1910,項目名稱:HyperNetworks,代碼行數:14,代碼來源:hypernetwork_modules.py

示例12: update_targets

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import fmod [as 別名]
def update_targets(targets, best_indices, idx, vocab_size):
    best_tensor_indices = torch.div(best_indices, vocab_size)
    best_token_indices = torch.fmod(best_indices, vocab_size)
    new_batch = torch.index_select(targets, 0, best_tensor_indices)
    new_batch[:, idx] = best_token_indices
    return new_batch 
開發者ID:tunz,項目名稱:transformer-pytorch,代碼行數:8,代碼來源:decoder.py

示例13: spiral_sampling

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import fmod [as 別名]
def spiral_sampling(grid, percentage):
    b, c, h, w = grid.size()    
    N = torch.tensor(h*w*percentage).int().float()    
    sampling = torch.zeros_like(grid)[:, 0, :, :].unsqueeze(1)
    phi_k = torch.tensor(0.0).float()
    for k in torch.arange(N - 1):
        k = k.float() + 1.0
        h_k = -1 + 2 * (k - 1) / (N - 1)
        theta_k = torch.acos(h_k)
        phi_k = phi_k + torch.tensor(3.6).float() / torch.sqrt(N) / torch.sqrt(1 - h_k * h_k) \
            if k > 1.0 else torch.tensor(0.0).float()
        phi_k = torch.fmod(phi_k, 2 * numpy.pi)
        sampling[:, :, int(theta_k / numpy.pi * h) - 1, int(phi_k / numpy.pi / 2 * w) - 1] += 1.0
    return (sampling > 0).float() 
開發者ID:VCL3D,項目名稱:SphericalViewSynthesis,代碼行數:16,代碼來源:test.py

示例14: step

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import fmod [as 別名]
def step(self, closure=None):
		loss = None
		if closure is not None:
			loss = closure()

		for group in self.param_groups:
			weight_decay = group['weight_decay']
			momentum = group['momentum']
			dampening = group['dampening']
			nesterov = group['nesterov']

			for p in group['params']:
				if p.grad is None:
					continue
				d_p = p.grad.data

				state = self.state[p]

				# State initialization
				if len(state) == 0:
					state['step'] = 0
				state['step'] += 1

				if weight_decay != 0:
					d_p.add_(weight_decay, p.data)
				if momentum != 0:
					param_state = self.state[p]
					if 'momentum_buffer' not in param_state:
						buf = param_state['momentum_buffer'] = torch.zeros_like(p.data)
						buf.mul_(momentum).add_(d_p)
					else:
						buf = param_state['momentum_buffer']
						buf.mul_(momentum).add_(1 - dampening, d_p)
					if nesterov:
						d_p = d_p.add(momentum, buf)
					else:
						d_p = buf

				# cyclical learning rate
				t = (np.fmod(state['step']-1, self.c)+1)/self.c
				if t <= 0.5:
					step_size = (1-2*t)*group['alpha1'] + 2*t*group['alpha2']
				else:
					step_size = 2*(1-t)*group['alpha2'] + (2*t-1)*group['alpha1']
				writer.add_scalar('lr', step_size, state['step'])
				p.data.add_(-step_size, d_p)

		return loss 
開發者ID:JHUVisionLab,項目名稱:multi-modal-regression,代碼行數:50,代碼來源:evaluateGeodesicRegressionModel.py

示例15: forward

# 需要導入模塊: import torch [as 別名]
# 或者: from torch import fmod [as 別名]
def forward(self, indices, num_classes):
        if not self._random_off_targets:
            # construct one hot
            batch_size = indices.size(0)
            self._one_hot.resize_(batch_size, num_classes).fill_(0.0)
            self._ones.resize_(batch_size, num_classes).fill_(1.0)
            self._one_hot.scatter_(1, indices.view(-1,1), self._ones)
            one_hot_labels = self._one_hot
            # label smoothing
            smooth_positives = 1.0 - self._label_smoothing
            smooth_negatives = self._label_smoothing / num_classes
            return one_hot_labels * smooth_positives + smooth_negatives
        else:
            # construct one hot
            batch_size = indices.size(0)
            self._one_hot.resize_(batch_size, num_classes).fill_(0.0)
            self._ones.resize_(batch_size, num_classes).fill_(1.0)
            self._one_hot.scatter_(1, indices.view(-1,1), self._ones)
            positive_labels = self._one_hot
            smooth_positives = 1.0 - self._label_smoothing
            smooth_negatives = self._label_smoothing
            positive_labels = positive_labels * smooth_positives

            negative_labels = 1.0 - self._one_hot
            self._noise.resize_(batch_size, num_classes).uniform_(1e-1, 1.0)
            self._noise = self._noise * negative_labels
            self._noise = smooth_negatives * self._noise / self._noise.sum(dim=1, keepdim=True)
            one_hot_labels = positive_labels + self._noise
            return one_hot_labels

            # label smoothing
            # smooth_positives = 1.0 - self._label_smoothing
            # sum_negatives = self._label_smoothing

            # self._noise.resize_(batch_size, num_classes).random_(1e-5, 1-3)
            # self._noise = self._noise / self._noise.sum()
            # torch.random()

            # batch_size = indices.size(0)
            # self._one_hot.resize_(batch_size, num_classes).fill_(0.0)
            # self._ones.resize_(batch_size, num_classes).fill_(1.0)
            # self._one_hot.scatter_(1, indices.view(-1,1), self._ones)

            # torch.rand(1e-5, smooth_negatives)
            # offsets = torch.from_numpy(np.random.randint(low=1, high=num_classes, size=[batch_size])).cuda()
            # false_indices = torch.fmod((indices + offsets).float(), float(num_classes)).long()
            # self._ones.resize_(batch_size, num_classes).fill_(smooth_negatives)
            # self._one_hot.scatter_(1, false_indices.view(-1,1), self._ones)

            # one_hot_labels = self._one_hot * 1.0

            return one_hot_labels 
開發者ID:mattiasegu,項目名稱:uncertainty_estimation_deep_learning,代碼行數:54,代碼來源:distributions.py


注:本文中的torch.fmod方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。