本文整理匯總了Python中torch.nn.Threshold方法的典型用法代碼示例。如果您正苦於以下問題:Python nn.Threshold方法的具體用法?Python nn.Threshold怎麽用?Python nn.Threshold使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類torch.nn
的用法示例。
在下文中一共展示了nn.Threshold方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: get_model
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Threshold [as 別名]
def get_model(load_weights = True):
deepsea_cpu = nn.Sequential( # Sequential,
nn.Conv2d(4,320,(1, 8),(1, 1)),
nn.Threshold(0, 1e-06),
nn.MaxPool2d((1, 4),(1, 4)),
nn.Dropout(0.2),
nn.Conv2d(320,480,(1, 8),(1, 1)),
nn.Threshold(0, 1e-06),
nn.MaxPool2d((1, 4),(1, 4)),
nn.Dropout(0.2),
nn.Conv2d(480,960,(1, 8),(1, 1)),
nn.Threshold(0, 1e-06),
nn.Dropout(0.5),
Lambda(lambda x: x.view(x.size(0),-1)), # Reshape,
nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(50880,925)), # Linear,
nn.Threshold(0, 1e-06),
nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(925,919)), # Linear,
nn.Sigmoid(),
)
if load_weights:
deepsea_cpu.load_state_dict(torch.load('model_files/deepsea_cpu.pth'))
return nn.Sequential(ReCodeAlphabet(), deepsea_cpu)
示例2: get_seqpred_model
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Threshold [as 別名]
def get_seqpred_model(load_weights = True):
deepsea_cpu = nn.Sequential( # Sequential,
nn.Conv2d(4,320,(1, 8),(1, 1)),
nn.Threshold(0, 1e-06),
nn.MaxPool2d((1, 4),(1, 4)),
nn.Dropout(0.2),
nn.Conv2d(320,480,(1, 8),(1, 1)),
nn.Threshold(0, 1e-06),
nn.MaxPool2d((1, 4),(1, 4)),
nn.Dropout(0.2),
nn.Conv2d(480,960,(1, 8),(1, 1)),
nn.Threshold(0, 1e-06),
nn.Dropout(0.5),
Lambda(lambda x: x.view(x.size(0),-1)), # Reshape,
nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(50880,925)), # Linear,
nn.Threshold(0, 1e-06),
nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(925,919)), # Linear,
nn.Sigmoid(),
)
if load_weights:
deepsea_cpu.load_state_dict(torch.load('model_files/deepsea_cpu.pth'))
return nn.Sequential(ReCodeAlphabet(), ConcatenateRC(), deepsea_cpu, AverageRC())
示例3: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Threshold [as 別名]
def __init__(self):
super().__init__()
self.fix_neg = nn.Threshold(0., 1e-8)
示例4: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Threshold [as 別名]
def __init__(self, num_resblocks,final_len):
super(CAEP, self).__init__()
self.num_resblocks = num_resblocks
self.threshold = torch.Tensor([1e-4])
self.prune = False
# Encoder
self.E_Conv_1 = conv_same(3, 32) # 3,128,128 => 32,128,128
self.E_PReLU_1 = nn.PReLU()
self.E_Conv_2 = conv_downsample(32, 64) # 32,128,128 => 64,64,64
self.E_PReLU_2 = nn.PReLU()
self.E_Conv_3 = conv_same(64, 128) # 64,64,64 => 128,64,64
self.E_PReLU_3 = nn.PReLU()
self.E_Res = res_layers(128, num_blocks=self.num_resblocks)
self.E_Conv_4 = conv_downsample(128, 64) # 128,64,64 => 64,32,32
self.E_Conv_5 = conv_downsample(64, 32)
self.E_Conv_6 = conv_same(32, final_len)
self.Pruner = nn.Threshold(self.threshold, 0, inplace=True)
# max_bpp = 32*16*16/128/128 * bits per int = 1 * bits per int
# Decoder
self.D_SubPix_00 = sub_pix(final_len, 32, 1)
self.D_SubPix_0 = sub_pix(32, 64, 2) # for fine tuning
self.D_SubPix_1 = sub_pix(64, 128, 2) # 64,32,32 => 128,64,64
self.D_PReLU_1 = nn.PReLU()
self.D_Res = res_layers(128, num_blocks=self.num_resblocks)
self.D_SubPix_2 = sub_pix(128, 64, 1) # 128,64,64 => 64,64,64
self.D_PReLU_2 = nn.PReLU()
self.D_SubPix_3 = sub_pix(64, 32, 2) # 64,64,64 => 32,128,128
self.D_PReLU_3 = nn.PReLU()
self.D_SubPix_4 = sub_pix(32, 3, 1) # 32,128,128 => 3,128,128
self.tanh = nn.Tanh()
self.__init_parameters__()
示例5: forward
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Threshold [as 別名]
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
#x = nn.Threshold(0.2, 0.0)#ActivationZeroThreshold(x)
x = self.fc3(x)
return x
示例6: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Threshold [as 別名]
def __init__(self, debias_model, tagging_model):
super(JointModel, self).__init__()
# TODO SHARING EMBEDDINGS FROM DEBIAS
self.debias_model = debias_model
self.tagging_model = tagging_model
self.token_sm = nn.Softmax(dim=2)
self.time_sm = nn.Softmax(dim=1)
self.tok_threshold = nn.Threshold(
ARGS.zero_threshold,
-10000.0 if ARGS.sequence_softmax else 0.0)
示例7: createScoreBranch
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Threshold [as 別名]
def createScoreBranch(self):
scoreBranch = nn.Sequential(
nn.Dropout(0.5),
nn.Conv2d(512, 1024, 1),
nn.Threshold(0, 1e-6), # do not know why
nn.Dropout(0.5),
nn.Conv2d(1024, 1, 1),
)
return scoreBranch
示例8: get_activation_fn
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Threshold [as 別名]
def get_activation_fn(name):
""" PyTorch built-in activation functions """
activation_functions = {
"linear": lambda: lambda x: x,
"relu": nn.ReLU,
"relu6": nn.ReLU6,
"elu": nn.ELU,
"prelu": nn.PReLU,
"leaky_relu": nn.LeakyReLU,
"threshold": nn.Threshold,
"hardtanh": nn.Hardtanh,
"sigmoid": nn.Sigmoid,
"tanh": nn.Tanh,
"log_sigmoid": nn.LogSigmoid,
"softplus": nn.Softplus,
"softshrink": nn.Softshrink,
"softsign": nn.Softsign,
"tanhshrink": nn.Tanhshrink,
}
if name not in activation_functions:
raise ValueError(
f"'{name}' is not included in activation_functions. use below one. \n {activation_functions.keys()}"
)
return activation_functions[name]
示例9: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Threshold [as 別名]
def __init__(self):
super(LayerThresholdTest, self).__init__()
self.threshold = random.random()
self.value = self.threshold + random.random()
self.thresh = nn.Threshold(self.threshold, self.value)
示例10: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Threshold [as 別名]
def __init__(self, V, rank, max_iterations=200000, tolerance=1e-8, test_conv=1000, gpu_id=0, seed=None,
init_method='nndsvd', floating_point_precision='float', min_iterations=2000):
"""
Run non-negative matrix factorisation using GPU. Uses beta-divergence.
Args:
V: Matrix to be factorised
rank: (int) number of latent dimensnions to use in factorisation
max_iterations: (int) Maximum number of update iterations to use during fitting
tolerance: tolerance to use in convergence tests. Lower numbers give longer times to convergence
test_conv: (int) How often to test for convergnce
gpu_id: (int) Which GPU device to use
seed: random seed, if None (default) datetime is used
init_method: how to initialise basis and coefficient matrices, options are:
- random (will always be the same if seed != None)
- NNDSVD
- NNDSVDa (fill in the zero elements with the average),
- NNDSVDar (fill in the zero elements with random values in the space [0:average/100]).
floating_point_precision: (string or type). Can be `double`, `float` or any type/string which
torch can interpret.
min_iterations: the minimum number of iterations to execute before termination. Useful when using
fp32 tensors as convergence can happen too early.
"""
#torch.cuda.set_device(gpu_id)
if seed is None:
seed = datetime.now().timestamp()
if floating_point_precision == 'float':
self._tensor_type = torch.FloatTensor
elif floating_point_precision == 'double':
self._tensor_type = torch.DoubleTensor
else:
self._tensor_type = floating_point_precision
torch.manual_seed(seed)
#torch.cuda.manual_seed(seed)
self.max_iterations = max_iterations
self.min_iterations = min_iterations
# If V is not in a batch, put it in a batch of 1
if len(V.shape) == 2:
V = V[None, :, :]
self._V = V.type(self._tensor_type)
self._fix_neg = nn.Threshold(0., 1e-8)
self._tolerance = tolerance
self._prev_loss = None
self._iter = 0
self._test_conv = test_conv
#self._gpu_id = gpu_id
self._rank = rank
self._W, self._H = self._initialise_wh(init_method)
示例11: __init__
# 需要導入模塊: from torch import nn [as 別名]
# 或者: from torch.nn import Threshold [as 別名]
def __init__(self, V, rank, max_iterations=200000, tolerance=1e-8, test_conv=1000, gpu_id=0, seed=None,
init_method='nndsvd', floating_point_precision='float', min_iterations=2000):
"""
Run non-negative matrix factorisation using GPU. Uses beta-divergence.
Args:
V: Matrix to be factorised
rank: (int) number of latent dimensnions to use in factorisation
max_iterations: (int) Maximum number of update iterations to use during fitting
tolerance: tolerance to use in convergence tests. Lower numbers give longer times to convergence
test_conv: (int) How often to test for convergnce
gpu_id: (int) Which GPU device to use
seed: random seed, if None (default) datetime is used
init_method: how to initialise basis and coefficient matrices, options are:
- random (will always be the same if seed != None)
- NNDSVD
- NNDSVDa (fill in the zero elements with the average),
- NNDSVDar (fill in the zero elements with random values in the space [0:average/100]).
floating_point_precision: (string or type). Can be `double`, `float` or any type/string which
torch can interpret.
min_iterations: the minimum number of iterations to execute before termination. Useful when using
fp32 tensors as convergence can happen too early.
"""
torch.cuda.set_device(gpu_id)
if seed is None:
seed = datetime.now().timestamp()
if floating_point_precision == 'float':
self._tensor_type = torch.FloatTensor
elif floating_point_precision == 'double':
self._tensor_type = torch.DoubleTensor
else:
self._tensor_type = floating_point_precision
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
self.max_iterations = max_iterations
self.min_iterations = min_iterations
# If V is not in a batch, put it in a batch of 1
if len(V.shape) == 2:
V = V[None, :, :]
self._V = V.type(self._tensor_type).cuda()
self._fix_neg = nn.Threshold(0., 1e-8)
self._tolerance = tolerance
self._prev_loss = None
self._iter = 0
self._test_conv = test_conv
self._gpu_id = gpu_id
self._rank = rank
self._W, self._H = self._initialise_wh(init_method)