本文整理汇总了Python中scipy.stats.truncnorm方法的典型用法代码示例。如果您正苦于以下问题:Python stats.truncnorm方法的具体用法?Python stats.truncnorm怎么用?Python stats.truncnorm使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.stats
的用法示例。
在下文中一共展示了stats.truncnorm方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ellipse_track_list
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import truncnorm [as 别名]
def ellipse_track_list(beam, n_t_sigma = 3, num = 1000, type = "contour"):
beam.sizes()
#sigma_x = sqrt((sigma_e*tws0.Dx)**2 + emit*tws0.beta_x)
#sigma_xp = sqrt((sigma_e*tws0.Dxp)**2 + emit*tws0.gamma_x)
if type == "contour":
t = np.linspace(0,2*pi, num)
x = n_t_sigma*beam.sigma_x*np.cos(t)
y = n_t_sigma*beam.sigma_xp*np.sin(t)
else:
x = truncnorm( -n_t_sigma, n_t_sigma, loc=0, scale=beam.sigma_x).rvs(num)
y = truncnorm( -n_t_sigma, n_t_sigma, loc=0, scale=beam.sigma_xp).rvs(num)
tws0 = Twiss(beam)
x_array, xp_array = phase_space_transform(x,y, tws0)
track_list = []
for x,y in zip(x_array + beam.x, xp_array + beam.xp):
p = Particle(x = x, px = y, p=-0.0)
pxy = Track_info(p, x, y)
track_list.append(pxy)
return track_list
示例2: _get_dest_points
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import truncnorm [as 别名]
def _get_dest_points(self, shape):
n = shape[0]
img_rows = shape[-3]
img_cols = shape[-2]
# source points
src = [[[0,0],[0,img_cols],[img_rows,0],[img_rows,img_cols]] for _ in range(n)]
if self.just_apply_noise:
return src
import scipy.stats as stats
lower, upper = -img_rows/3, img_rows/3
mu, sigma = FLAGS.transform_mean, FLAGS.transform_stddev
X = stats.truncnorm(
(lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)
# we will add this to the source points, i.e. these are random offsets
# random = np.random.normal(FLAGS.transform_mean, FLAGS.transform_stddev, (n, 4, 2))
random = X.rvs((n, 4, 2))
return src + random
示例3: __init__
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import truncnorm [as 别名]
def __init__(self):
super(MyIncept, self).__init__()
self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
self.Mixed_5b = InceptionA(192, pool_features=32)
self.Mixed_5c = InceptionA(256, pool_features=64)
self.Mixed_5d = InceptionA(288, pool_features=64)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
stddev = m.stddev if hasattr(m, 'stddev') else 0.1
X = stats.truncnorm(-2, 2, scale=stddev)
values = torch.Tensor(X.rvs(m.weight.numel()))
values = values.view(m.weight.size())
m.weight.data.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
示例4: __init__
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import truncnorm [as 别名]
def __init__(self):
super(Inception3SpatialAdapter_6e, self).__init__()
self.Mixed_7a = InceptionD(768)
self.Mixed_7b = InceptionE(1280)
self.Mixed_7c = InceptionE(2048)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
stddev = m.stddev if hasattr(m, 'stddev') else 0.1
X = stats.truncnorm(-2, 2, scale=stddev)
values = torch.Tensor(X.rvs(m.weight.numel()))
values = values.view(m.weight.size())
m.weight.data.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
示例5: __init__
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import truncnorm [as 别名]
def __init__(self, mean, variance, lower, upper):
if (mean is not None) and (variance is not None) and (lower is not None) and (upper is not None):
meanParent = mean
varianceParent = variance
self.std = Gaussian(mean = 0.0, variance = 1.0)
self.parent = Gaussian(mean = meanParent, variance = varianceParent)
self.lower = lower
self.upper = upper
self.skewness = 0.0
self.kurtosis = 0.0
self.bounds = np.array([-np.inf, np.inf])
self.beta = (self.upper - self.parent.mean)/np.sqrt(self.parent.variance)
self.alpha = (self.lower - meanParent)/np.sqrt(varianceParent)
self.x_range_for_pdf = np.linspace(self.lower, self.upper, RECURRENCE_PDF_SAMPLES)
self.parents = truncnorm(a =self.alpha , b =self.beta, loc=meanParent, scale=np.sqrt(varianceParent))
self.mean = self.parents.mean()
self.variance = self.parents.var()
self.sigma = np.sqrt(self.variance)
示例6: __init__
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import truncnorm [as 别名]
def __init__(self, img_feat, tags_idx, a_tags_idx, test_tags_idx, z_dim, vocab_processor):
self.z_sampler = stats.truncnorm((-1 - 0.) / 1., (1 - 0.) / 1., loc=0., scale=1)
self.length = len(tags_idx)
self.current = 0
self.img_feat = img_feat
self.tags_idx = tags_idx
self.a_tags_idx = a_tags_idx
self.w_idx = np.arange(self.length)
self.w_idx2 = np.arange(self.length)
self.tmp = 0
self.epoch = 0
self.vocab_processor = vocab_processor
self.vocab_size = len(vocab_processor._reverse_mapping)
self.unk_id = vocab_processor._mapping['<UNK>']
self.eos_id = vocab_processor._mapping['<EOS>']
self.hair_id = vocab_processor._mapping['hair']
self.eyes_id = vocab_processor._mapping['eyes']
self.gen_info()
self.test_tags_idx = self.gen_test_hot(test_tags_idx)
self.fixed_z = self.next_noise_batch(len(self.test_tags_idx), z_dim)
idx = np.random.permutation(np.arange(self.length))
self.w_idx2 = self.w_idx2[idx]
示例7: _calc_skewness_truncnorm
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import truncnorm [as 别名]
def _calc_skewness_truncnorm(k_occurrence: np.ndarray) -> float:
""" Hubness measure; corrected for non-negativity of k-occurrence.
Hubness as skewness of truncated normal distribution
estimated from k-occurrence histogram.
Parameters
----------
k_occurrence: ndarray
Reverse nearest neighbor count for each object.
"""
clip_left = 0
clip_right = np.iinfo(np.int64).max
k_occurrence_mean = k_occurrence.mean()
k_occurrence_std = k_occurrence.std(ddof=1)
a = (clip_left - k_occurrence_mean) / k_occurrence_std
b = (clip_right - k_occurrence_mean) / k_occurrence_std
skew_truncnorm = stats.truncnorm(a, b).moment(3)
return skew_truncnorm
示例8: discrete_tnorm
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import truncnorm [as 别名]
def discrete_tnorm(a, b, tgt_loc, sigma=1, n_steps=100):
def phi(zeta):
return 1 / (np.sqrt(2 * np.pi)) * np.exp(-0.5 * zeta**2)
def Phi(x):
return 0.5 * (1 + erf(x / np.sqrt(2)))
def tgt_loc_update(x):
y1 = phi((a - x) / sigma)
y2 = phi((b - x) / sigma)
x1 = Phi((b - x) / sigma)
x2 = Phi((a - x) / sigma)
denom = x1 - x2 + 1E-4
return y1 / denom - y2 / denom
x = tgt_loc
direction = np.sign(tgt_loc - (b - a))
for _ in range(n_steps):
x = tgt_loc - sigma * tgt_loc_update(x)
tn = truncnorm((a - x) / sigma, (b - x) / sigma, loc=x, scale=sigma)
rrange = np.arange(a, b + 1)
pmf = tn.pdf(rrange)
pmf /= np.sum(pmf)
return pmf
示例9: __init__
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import truncnorm [as 别名]
def __init__(self, num_classes=1000, aux_logits=True, transform_input=False):
super(Inception3, self).__init__()
self.aux_logits = aux_logits
self.transform_input = transform_input
self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
self.Mixed_5b = InceptionA(192, pool_features=32)
self.Mixed_5c = InceptionA(256, pool_features=64)
self.Mixed_5d = InceptionA(288, pool_features=64)
self.Mixed_6a = InceptionB(288)
self.Mixed_6b = InceptionC(768, channels_7x7=128)
self.Mixed_6c = InceptionC(768, channels_7x7=160)
self.Mixed_6d = InceptionC(768, channels_7x7=160)
self.Mixed_6e = InceptionC(768, channels_7x7=192)
if aux_logits:
self.AuxLogits = InceptionAux(768, num_classes)
self.Mixed_7a = InceptionD(768)
self.Mixed_7b = InceptionE(1280)
self.Mixed_7c = InceptionE(2048)
self.fc = nn.Linear(2048, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
stddev = m.stddev if hasattr(m, 'stddev') else 0.1
X = stats.truncnorm(-2, 2, scale=stddev)
values = torch.Tensor(X.rvs(m.weight.numel()))
values = values.view(m.weight.size())
m.weight.data.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
示例10: __init__
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import truncnorm [as 别名]
def __init__(self, config_channels, anchors, num_cls, transform_input=False):
nn.Module.__init__(self)
self.transform_input = transform_input
self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
self.Mixed_5b = InceptionA(192, pool_features=32)
self.Mixed_5c = InceptionA(256, pool_features=64)
self.Mixed_5d = InceptionA(288, pool_features=64)
self.Mixed_6a = InceptionB(288)
self.Mixed_6b = InceptionC(768, channels_7x7=128)
self.Mixed_6c = InceptionC(768, channels_7x7=160)
self.Mixed_6d = InceptionC(768, channels_7x7=160)
self.Mixed_6e = InceptionC(768, channels_7x7=192)
# aux_logits
self.Mixed_7a = InceptionD(768)
self.Mixed_7b = InceptionE(1280)
self.Mixed_7c = InceptionE(2048)
self.conv = nn.Conv2d(2048, model.output_channels(len(anchors), num_cls), 1)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
stddev = m.stddev if hasattr(m, 'stddev') else 0.1
X = stats.truncnorm(-2, 2, scale=stddev)
values = torch.Tensor(X.rvs(m.weight.data.numel()))
m.weight.data.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
if config_channels.config.getboolean('model', 'pretrained'):
url = _model.model_urls['inception_v3_google']
logging.info('use pretrained model: ' + url)
state_dict = self.state_dict()
for key, value in torch.utils.model_zoo.load_url(url).items():
if key in state_dict:
state_dict[key] = value
self.load_state_dict(state_dict)
示例11: _initialize_weights
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import truncnorm [as 别名]
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
X = stats.truncnorm(-2, 2, scale=0.01)
values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype)
values = values.view(m.weight.size())
with torch.no_grad():
m.weight.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
示例12: __init__
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import truncnorm [as 别名]
def __init__(self, num_channels, num_classes=400):
super(Inception3, self).__init__()
self.num_classes = num_classes
self.Conv2d_1a_3x3 = BasicConv2d(num_channels, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
self.Mixed_5b = InceptionA(192, pool_features=32)
self.Mixed_5c = InceptionA(256, pool_features=64)
self.Mixed_5d = InceptionA(288, pool_features=64)
self.Mixed_6a = InceptionB(288)
self.Mixed_6b = InceptionC(768, channels_7x7=128)
self.Mixed_6c = InceptionC(768, channels_7x7=160)
self.Mixed_6d = InceptionC(768, channels_7x7=160)
self.Mixed_6e = InceptionC(768, channels_7x7=192)
self.Mixed_7a = InceptionD(768)
self.Mixed_7b = InceptionE(1280)
self.Mixed_7c = InceptionE(2048)
self.dropout = nn.Dropout(0.60)
if num_classes != 1000:
self.fc_kin = nn.Linear(2048, self.num_classes)
print('Building Final Classification layer with ', num_classes, 'classes')
else:
self.fc = nn.Linear(2048, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
stddev = m.stddev if hasattr(m, 'stddev') else 0.02
X = stats.truncnorm(-2, 2, scale=stddev)
values = torch.Tensor(X.rvs(m.weight.data.numel()))
values = values.view(m.weight.data.size())
m.weight.data.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
示例13: __init__
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import truncnorm [as 别名]
def __init__(self, output_channels=1000, aux_logits=True, transform_input=False, **kwargs):
super(Inception3, self).__init__()
if self.training and aux_logits == False:
logging.warning('Enable AUX_LOGITS in Inception_v3 at training time for accurate training!')
self.expected_input_size = (299, 299)
self.aux_logits = aux_logits
self.transform_input = transform_input
self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
self.Mixed_5b = InceptionA(192, pool_features=32)
self.Mixed_5c = InceptionA(256, pool_features=64)
self.Mixed_5d = InceptionA(288, pool_features=64)
self.Mixed_6a = InceptionB(288)
self.Mixed_6b = InceptionC(768, channels_7x7=128)
self.Mixed_6c = InceptionC(768, channels_7x7=160)
self.Mixed_6d = InceptionC(768, channels_7x7=160)
self.Mixed_6e = InceptionC(768, channels_7x7=192)
if aux_logits:
self.AuxLogits = InceptionAux(768, output_channels)
self.Mixed_7a = InceptionD(768)
self.Mixed_7b = InceptionE(1280)
self.Mixed_7c = InceptionE(2048)
self.fc = nn.Linear(2048, output_channels)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
stddev = m.stddev if hasattr(m, 'stddev') else 0.1
X = stats.truncnorm(-2, 2, scale=stddev)
values = torch.Tensor(X.rvs(m.weight.data.numel()))
values = values.view(m.weight.data.size())
m.weight.data.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
示例14: tgauss
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import truncnorm [as 别名]
def tgauss(mu=0, sigma=1, trunc=2, num=1):
lower = -sigma*trunc
upper = sigma*trunc
X = stats.truncnorm((lower - mu) / sigma, (upper - mu) / sigma, loc=mu, scale=sigma)
if num == 1:
x = X.rvs(1)[0]
else:
x = X.rvs(num)
return x
示例15: __init__
# 需要导入模块: from scipy import stats [as 别名]
# 或者: from scipy.stats import truncnorm [as 别名]
def __init__(self, num_classes=1000, aux_logits=True, transform_input=False):
super(Inception3, self).__init__()
self.aux_logits = aux_logits
self.transform_input = transform_input
self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
self.Conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
self.Mixed_5b = InceptionA(192, pool_features=32)
self.Mixed_5c = InceptionA(256, pool_features=64)
self.Mixed_5d = InceptionA(288, pool_features=64)
self.Mixed_6a = InceptionB(288)
self.Mixed_6b = InceptionC(768, channels_7x7=128)
self.Mixed_6c = InceptionC(768, channels_7x7=160)
self.Mixed_6d = InceptionC(768, channels_7x7=160)
self.Mixed_6e = InceptionC(768, channels_7x7=192)
if aux_logits:
self.AuxLogits = InceptionAux(768, num_classes)
self.Mixed_7a = InceptionD(768)
self.Mixed_7b = InceptionE(1280)
self.Mixed_7c = InceptionE(2048)
self.fc = nn.Linear(2048, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
import scipy.stats as stats
stddev = m.stddev if hasattr(m, 'stddev') else 0.1
X = stats.truncnorm(-2, 2, scale=stddev)
values = torch.Tensor(X.rvs(m.weight.data.numel()))
values = values.view(m.weight.data.size())
m.weight.data.copy_(values)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()