本文整理匯總了Python中torch.set_printoptions方法的典型用法代碼示例。如果您正苦於以下問題:Python torch.set_printoptions方法的具體用法?Python torch.set_printoptions怎麽用?Python torch.set_printoptions使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch
的用法示例。
在下文中一共展示了torch.set_printoptions方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: plot_att_change
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import set_printoptions [as 別名]
def plot_att_change(batch_doc, network, record, save_img_path, uid='temp',
epoch=0, device=torch.device('cpu'), word_alphabet=None, show_net=False, graph_types=['coref']):
char, word, posi, labels, feats, adjs = [batch_doc[i].to(device) for i in
["chars", "word_ids", "posi", "ner_ids", "feat_ids", "adjs"]]
word_txt = []
if word_alphabet:
doc = word[0][word[0] != PAD_ID_WORD]
word_txt = [word_alphabet.get_instance(w) for w in doc]
adjs_cp = adjs.clone()
# save adj to file
print_thres = adjs.size(-1) * adjs.size(-2) + 1000
torch.set_printoptions(threshold=print_thres)
# check adj_old, adj_new
# select = plot_att(adjs_cp, word_txt, record, epoch=epoch)
network.loss(None, word, char, adjs_cp, labels, show_net=show_net, graph_types=graph_types)
# plot_att(adjs_cp, word_txt, record, epoch=epoch, select=select)
示例2: decide
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import set_printoptions [as 別名]
def decide(self, prev_output_tokens, encoder_out, context_size):
torch.set_printoptions(precision=1)
# source embeddings
src_emb = encoder_out['ctrl_encoder_out'][:, :context_size] # B, Ts, ds
# target embeddings:
positions = self.ctrl_embed_positions(
prev_output_tokens,
incremental_state=None,
) if self.ctrl_embed_positions is not None else None
# Build the full grid
tgt_emb = self.embed_scale * self.ctrl_embed_tokens(prev_output_tokens)
if positions is not None:
tgt_emb += positions
tgt_emb = self.embedding_dropout(tgt_emb)
src_length = src_emb.size(1)
tgt_length = tgt_emb.size(1)
# build 2d "image" of embeddings
src_emb = _expand(src_emb, 1, tgt_length) # B, Tt, Ts, ds
tgt_emb = _expand(tgt_emb, 2, src_length) # B, Tt, Ts, dt
x = torch.cat((src_emb, tgt_emb), dim=3) # B, Tt, Ts, C=ds+dt
obs = self.controller_feat(x)
controls = self.controller.predict_read_write(obs)
pwrite = torch.exp(controls[:,-1,-1,1])
return pwrite
示例3: decide
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import set_printoptions [as 別名]
def decide(self, prev_output_tokens, encoder_out, context_size):
torch.set_printoptions(precision=2)
# source embeddings
src_emb = encoder_out['encoder_out'][:, :context_size] # B, Ts, ds
# target embeddings:
positions = self.embed_positions(
prev_output_tokens,
incremental_state=None,
) if self.embed_positions is not None else None
# Build the full grid
tgt_emb = self.embed_scale * self.embed_tokens(prev_output_tokens)
if positions is not None:
tgt_emb += positions
tgt_emb = self.embedding_dropout(tgt_emb)
src_length = src_emb.size(1)
tgt_length = tgt_emb.size(1)
# build 2d "image" of embeddings
src_emb = _expand(src_emb, 1, tgt_length) # B, Tt, Ts, ds
tgt_emb = _expand(tgt_emb, 2, src_length) # B, Tt, Ts, dt
x = torch.cat((src_emb, tgt_emb), dim=3) # B, Tt, Ts, C=ds+dt
obs = self.controller_feat(x)
controls = self.controller.predict_read_write(obs)
pwrite = torch.exp(controls[:,-1,-1,1])
return pwrite
示例4: decode
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import set_printoptions [as 別名]
def decode(fn, sound_path, exe_path, scp_path, out_dir):
"""
Takes a filepath and prints out the corresponding shell command to run that specific
kaldi configuration. It also calls compliance.kaldi and prints the two outputs.
Example:
>> fn = 'fbank-1.1009-2.5985-1.1875-0.8750-5723-true-918-4-0.31-true-false-true-true-' \
'false-false-false-true-4595-4281-1.0000-hamming.ark'
>> decode(fn)
"""
out_fn = out_dir + fn
fn = fn[len('fbank-'):-len('.ark')]
arr = [
'blackman_coeff', 'energy_floor', 'frame_length', 'frame_shift', 'high_freq', 'htk_compat',
'low_freq', 'num_mel_bins', 'preemphasis_coefficient', 'raw_energy', 'remove_dc_offset',
'round_to_power_of_two', 'snip_edges', 'subtract_mean', 'use_energy', 'use_log_fbank',
'use_power', 'vtln_high', 'vtln_low', 'vtln_warp', 'window_type']
fn_split = fn.split('-')
assert len(fn_split) == len(arr), ('Len mismatch: %d and %d' % (len(fn_split), len(arr)))
inputs = {arr[i]: utils.parse(fn_split[i]) for i in range(len(arr))}
# print flags for C++
s = ' '.join(['--' + arr[i].replace('_', '-') + '=' + fn_split[i] for i in range(len(arr))])
logging.info(exe_path + ' --dither=0.0 --debug-mel=true ' + s + ' ' + scp_path + ' ' + out_fn)
logging.info()
# print args for python
inputs['dither'] = 0.0
logging.info(inputs)
sound, sample_rate = torchaudio.load_wav(sound_path)
kaldi_output_dict = {k: v for k, v in torchaudio.kaldi_io.read_mat_ark(out_fn)}
res = torchaudio.compliance.kaldi.fbank(sound, **inputs)
torch.set_printoptions(precision=10, sci_mode=False)
logging.info(res)
logging.info(kaldi_output_dict['my_id'])
示例5: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import set_printoptions [as 別名]
def forward(self, input):
if not self.training or self.keep_prob == 1:
return input
gamma = (1. - self.keep_prob) / self.block_size ** 2
for sh in input.shape[2:]:
gamma *= sh / (sh - self.block_size + 1)
M = torch.bernoulli(torch.ones_like(input) * gamma)
Msum = F.conv2d(M,
torch.ones((input.shape[1], 1, self.block_size, self.block_size)).to(device=input.device,
dtype=input.dtype),
padding=self.block_size // 2,
groups=input.shape[1])
torch.set_printoptions(threshold=5000)
mask = (Msum < 1).to(device=input.device, dtype=input.dtype)
return input * mask * mask.numel() /mask.sum() #TODO input * mask * self.keep_prob ?
示例6: write_off
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import set_printoptions [as 別名]
def write_off(data, path):
r"""Writes a :class:`torch_geometric.data.Data` object to an OFF (Object
File Format) file.
Args:
data (:class:`torch_geometric.data.Data`): The data object.
path (str): The path to the file.
"""
num_nodes, num_faces = data.pos.size(0), data.face.size(1)
pos = data.pos.to(torch.float)
face = data.face.t()
num_vertices = torch.full((num_faces, 1), face.size(1), dtype=torch.long)
face = torch.cat([num_vertices, face], dim=-1)
threshold = PRINT_OPTS.threshold
torch.set_printoptions(threshold=float('inf'))
pos_repr = re.sub(',', '', _tensor_str(pos, indent=0))
pos_repr = '\n'.join([x[2:-1] for x in pos_repr.split('\n')])[:-1]
face_repr = re.sub(',', '', _tensor_str(face, indent=0))
face_repr = '\n'.join([x[2:-1] for x in face_repr.split('\n')])[:-1]
with open(path, 'w') as f:
f.write('OFF\n{} {} 0\n'.format(num_nodes, num_faces))
f.write(pos_repr)
f.write('\n')
f.write(face_repr)
f.write('\n')
torch.set_printoptions(threshold=threshold)
示例7: setUp
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import set_printoptions [as 別名]
def setUp(self):
torch.set_printoptions(linewidth=160, threshold=1e3)
seed = 7
np.random.seed(1234)
seed = np.random.randint(1e5)
torch.manual_seed(seed)
self.eps = 1e-4
示例8: get_attn_adj_mask
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import set_printoptions [as 別名]
def get_attn_adj_mask(adjs):
adjs_mask = adjs.ne(0) # batch*n_node*n_node
# torch.set_printoptions(precision=None, threshold=float('inf'))
# pdb.set_trace()
n_neig = adjs_mask.sum(dim=2)
adjs_mask[:, :, 0] += n_neig.eq(0) # this is for making PAD not all zeros
return adjs_mask.eq(0)
示例9: set_printoptions
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import set_printoptions [as 別名]
def set_printoptions(
precision=None, threshold=None, edgeitems=None, linewidth=None, profile=None, sci_mode=None
):
"""
Configures the printing options. List of items shamelessly taken from NumPy and PyTorch (thanks guys!).
Parameters
----------
precision: int
Number of digits of precision for floating point output (default=4).
threshold: int
Total number of array elements which trigger summarization rather than full `repr` string (default=1000).
edgeitems: int
Number of array items in summary at beginning and end of each dimension (default=3).
linewidth: int
The number of characters per line for the purpose of inserting line breaks (default = 80).
profile: str
Sane defaults for pretty printing. Can override with any of the above options. Can be any one of `default`,
`short`, `full`.
sci_mode: bool
Enable (True) or disable (False) scientific notation. If None (default) is specified, the value is automatically
inferred by HeAT.
"""
torch.set_printoptions(precision, threshold, edgeitems, linewidth, profile, sci_mode)
# HeAT profiles will print a bit wider than PyTorch does
if profile == "default" and linewidth is None:
torch._tensor_str.PRINT_OPTS.linewidth = _DEFAULT_LINEWIDTH
elif profile == "short" and linewidth is None:
torch._tensor_str.PRINT_OPTS.linewidth = _DEFAULT_LINEWIDTH
elif profile == "full" and linewidth is None:
torch._tensor_str.PRINT_OPTS.linewidth = _DEFAULT_LINEWIDTH
示例10: unit_train
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import set_printoptions [as 別名]
def unit_train(self, data):
xs, ys, frame_lens, label_lens, filenames, _ = data
try:
batch_size = xs.size(0)
if self.use_cuda:
xs = xs.cuda(non_blocking=True)
ys_hat, frame_lens = self.model(xs, frame_lens)
if self.fp16:
ys_hat = ys_hat.float()
ys_hat = ys_hat.transpose(0, 1).contiguous() # TxNxH
#torch.set_printoptions(threshold=5000000)
#print(ys_hat.shape, frame_lens, ys.shape, label_lens)
#print(onehot2int(ys_hat).squeeze(), ys)
loss = self.loss(ys_hat, ys, frame_lens, label_lens)
if torch.isnan(loss) or loss.item() == float("inf") or loss.item() == -float("inf"):
logger.warning("received an nan/inf loss: probably frame_lens < label_lens or the learning rate is too high")
#raise RuntimeError
return None
if frame_lens.cpu().lt(2*label_lens).nonzero().numel():
logger.debug("the batch includes a data with frame_lens < 2*label_lens: set loss to zero")
loss.mul_(0)
loss_value = loss.item()
self.optimizer.zero_grad()
if self.fp16:
#self.optimizer.backward(loss)
#self.optimizer.clip_master_grads(self.max_norm)
with self.optimizer.scale_loss(loss) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.max_norm)
self.optimizer.step()
if self.use_cuda:
torch.cuda.synchronize()
del loss
return loss_value
except Exception as e:
print(e)
print(filenames, frame_lens, label_lens)
raise
示例11: decide
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import set_printoptions [as 別名]
def decide(self, src_tokens, prev_output_tokens, writing_grid):
# torch.set_printoptions(precision=2)
if not self.share_embeddings:
x = self.observation_grid(src_tokens,
prev_output_tokens)
else:
x = writing_grid
# Cumulative ResNet:
x = self.net(x)
# Cell aggreegation
x = x[:,-1, -1]
# The R/W decisions:
x = torch.sigmoid(self.gate(x)).squeeze(-1) # p(read)
return 1-x
示例12: decide
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import set_printoptions [as 別名]
def decide(self, x):
torch.set_printoptions(precision=2)
# Final LN
if self.final_ln is not None:
x = self.final_ln(x)
# Aggregate
x, _ = self.aggregator(x)
x = x[:, -1, -1]
# A stack of linear layers
x = self.net(x)
# The R/W decisions:
x = torch.sigmoid(self.gate(x)).squeeze(-1) # p(read)
return 1-x
示例13: setUp
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import set_printoptions [as 別名]
def setUp(self, seed=1234):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# Set pytorch print precision
torch.set_printoptions(precision=10)
示例14: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import set_printoptions [as 別名]
def forward(self, input):
if not self.training or self.keep_prob == 1:
return input
gamma = (1. - self.keep_prob) / self.block_size ** 2
for sh in input.shape[2:]:
gamma *= sh / (sh - self.block_size + 1)
M = torch.bernoulli(torch.ones_like(input) * gamma)
Msum = F.conv2d(M,
torch.ones((input.shape[1], 1, self.block_size, self.block_size)).to(device=input.device,
dtype=input.dtype),
padding=self.block_size // 2,
groups=input.shape[1])
torch.set_printoptions(threshold=5000)
mask = (Msum < 1).to(device=input.device, dtype=input.dtype)
return input * mask * mask.numel() / mask.sum() # TODO input * mask * self.keep_prob ?
示例15: forward
# 需要導入模塊: import torch [as 別名]
# 或者: from torch import set_printoptions [as 別名]
def forward(self, input, input_hidden, vocab, vocab_rev, decode_steps_t, graphs):
all_outputs, all_words = [], []
decoder_input = torch.tensor([vocab_rev['<s>']] * input.size(0)).cuda()
decoder_hidden = input_hidden.unsqueeze(0)
torch.set_printoptions(profile="full")
for di in range(self.max_decode_steps):
ret_decoder_output, decoder_output, decoder_hidden = self.decoder(decoder_input, decoder_hidden, input, graphs)
if self.k == 1:
all_outputs.append(ret_decoder_output)
dec_objs = []
for i in range(decoder_output.shape[0]):
dec_probs = F.softmax(ret_decoder_output[i][graphs[i]], dim=0)
idx = dec_probs.multinomial(1)
graph_list = graphs[i].nonzero().cpu().numpy().flatten().tolist()
assert len(graph_list) == dec_probs.numel()
dec_objs.append(graph_list[idx])
topi = torch.LongTensor(dec_objs).cuda()
# dec_probs = self.softmax(decoder_output)
# topi = dec_probs.multinomial(num_samples=1)
# topi = self.softmax(decoder_output).topk(1)[1]
decoder_input = topi.squeeze().detach()
all_words.append(topi)
else:
topv, topi = decoder_output.topk(self.k)
topv = self.softmax(topv)
topv = topv.cpu().numpy()
topi = topi.cpu().numpy()
cur_objs = []
for i in range(graphs.size(0)):
cur_obj = np.random.choice(topi[i].reshape(-1), p=topv[i].reshape(-1))
cur_objs.append(cur_obj)
decoder_input = torch.LongTensor(cur_objs).cuda()
all_words.append(decoder_input)
all_outputs.append(decoder_output)
return torch.stack(all_outputs), torch.stack(all_words)