本文整理匯總了Python中numpy.row_stack方法的典型用法代碼示例。如果您正苦於以下問題:Python numpy.row_stack方法的具體用法?Python numpy.row_stack怎麽用?Python numpy.row_stack使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類numpy
的用法示例。
在下文中一共展示了numpy.row_stack方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: normalize
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import row_stack [as 別名]
def normalize(data, bounds, reverse=False, return_bounds=False):
from pymoo.util.normalization import normalize as _normalize
_F = np.row_stack([e[0] for e in data])
if bounds is None:
bounds = (_F.min(axis=0), _F.max(axis=0))
to_plot = []
for k in range(len(data)):
F = _normalize(data[k][0], bounds[0], bounds[1])
if reverse:
F = 1 - F
to_plot.append([F, data[k][1]])
if return_bounds:
return to_plot, bounds
else:
return to_plot
示例2: _do
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import row_stack [as 別名]
def _do(self):
# initial a figure with a single plot
self.init_figure()
# equal axis length and no ticks
equal_axis(self.ax)
no_ticks(self.ax)
# determine the overall scale of points
_F = np.row_stack([e[0] for e in self.to_plot])
_min, _max = _F.min(axis=0), _F.max(axis=0)
V = get_uniform_points_around_circle(self.n_dim)
plot_axes_arrow(self.ax, V, extend_factor=self.axis_extension, **{**self.axis_style, **self.arrow_style})
plot_axis_labels(self.ax, V, self.get_labels(), **self.axis_label_style)
# normalize in range for this plot - here no implicit normalization as in radviz
bounds = parse_bounds(self.bounds, self.n_dim)
to_plot_norm = normalize(self.to_plot, bounds)
for k, (F, kwargs) in enumerate(to_plot_norm):
N = (F[..., None] * V).sum(axis=1)
self.ax.scatter(N[:, 0], N[:, 1], **kwargs)
示例3: _do
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import row_stack [as 別名]
def _do(self, problem, X, **kwargs):
# get the X of parents and count the matings
_, n_matings, n_var = X.shape
# start point of crossover
r = np.row_stack([np.random.permutation(n_var - 1) + 1 for _ in range(n_matings)])[:, :self.n_points]
r.sort(axis=1)
r = np.column_stack([r, np.full(n_matings, n_var)])
# the mask do to the crossover
M = np.full((n_matings, n_var), False)
# create for each individual the crossover range
for i in range(n_matings):
j = 0
while j < r.shape[1] - 1:
a, b = r[i, j], r[i, j + 1]
M[i, a:b] = True
j += 2
_X = crossover_mask(X, M)
return _X
示例4: _do
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import row_stack [as 別名]
def _do(self):
rnd = sample_on_unit_simplex(self.n_sample_points, self.n_dim, unit_simplex_mapping=self.sampling)
def h(n):
return get_partition_closest_to_points(n, self.n_dim)
H = h(self.n_points)
E = get_reference_directions("das-dennis", self.n_dim, n_partitions=H)
E = E[np.any(E == 0, axis=1)]
# add the edge coordinates
X = np.row_stack([E, rnd])
I = select_points_with_maximum_distance(X, self.n_points, selected=list(range((len(E)))))
centroids = X[I].copy()
if self.kmeans:
#centroids = kmeans(X, centroids, self.kmeans_max_iter, self.kmeans_a_tol, 0)
centroids = kmeans(X, centroids, self.kmeans_max_iter, self.kmeans_a_tol, len(E))
return centroids
示例5: mean_mean
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import row_stack [as 別名]
def mean_mean(z):
for row in np.eye(z.shape[1]):
if not np.any(np.all(row == z, axis=1)):
z = np.row_stack([z, row])
n_points, n_dim = z.shape
D = vectorized_cdist(z, z)
np.fill_diagonal(D, np.inf)
k = n_dim - 1
I = D.argsort(axis=1)[:, :k]
first = np.column_stack([np.arange(n_points) for _ in range(k)])
val = np.mean(D[first, I], axis=1)
return val.mean()
示例6: calc_pareto_front
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import row_stack [as 別名]
def calc_pareto_front(problem, ref_dirs):
n_pareto_points = 200
np.random.seed(1)
pf = problem.pareto_front(n_pareto_points=n_pareto_points, use_cache=False)
# survival = ReferenceDirectionSurvival(ref_dirs)
survival = RankAndCrowdingSurvival()
for i in range(1000):
_pf = problem.pareto_front(n_pareto_points=n_pareto_points, use_cache=False)
F = np.row_stack([pf, _pf])
pop = Population().new("F", F)
pop = survival.do(problem, pop, n_pareto_points // 2)
pf = pop.get("F")
return pf
示例7: test_fermi_energy_spin_resolved_even_kpoints
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import row_stack [as 別名]
def test_fermi_energy_spin_resolved_even_kpoints(self):
""" This is to test the determination of Fermi level in spin-resolved case"""
ee = np.row_stack((np.linspace(-10.1, 100.0, 1003),
np.linspace(-10.2, 100.0, 1003),
np.linspace(-10.3, 100.0, 1003),
np.linspace(-10.4, 100.0, 1003))).reshape((4,1,1003))
nelec = 20.0
telec = 0.02
nkpts = ee.shape[0]
nspin = ee.shape[-2]
#print(ee)
fermi_energy = get_fermi_energy(ee, nelec, telec)
occ = (3.0-nspin)*fermi_dirac_occupations(telec, ee, fermi_energy)
#print(occ)
#print(occ.sum()/nkpts)
#print(fermi_energy)
self.assertAlmostEqual(occ.sum()/nkpts, 20.0)
self.assertAlmostEqual(fermi_energy, -9.2045998319213016)
示例8: test_fermi_energy_spin_resolved_even_kpoints_spin2
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import row_stack [as 別名]
def test_fermi_energy_spin_resolved_even_kpoints_spin2(self):
""" This is to test the determination of Fermi level in spin-resolved case"""
ee = np.row_stack((np.linspace(-10.1, 100.0, 1003),
np.linspace(-10.2, 100.0, 1003),
np.linspace(-10.3, 100.0, 1003),
np.linspace(-10.4, 100.0, 1003))).reshape((2,2,1003))
nelec = 20.0
telec = 0.02
nkpts = ee.shape[0]
nspin = ee.shape[-2]
#print(ee)
fermi_energy = get_fermi_energy(ee, nelec, telec)
occ = (3.0-nspin)*fermi_dirac_occupations(telec, ee, fermi_energy)
#print(occ)
#print(occ.sum()/nkpts)
#print(fermi_energy)
self.assertAlmostEqual(occ.sum()/nkpts, 20.0)
self.assertAlmostEqual(fermi_energy, -9.2045998319213016)
示例9: _get_glyph
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import row_stack [as 別名]
def _get_glyph(gnum, height, width, shift_prob, shift_size):
if isinstance(gnum, list):
n = randint(*gnum)
else:
n = gnum
glyph = random_points_in_circle(
n, 0, 0, 0.5
)*array((width, height), 'float')
_spatial_sort(glyph)
if random()<shift_prob:
shift = ((-1)**randint(0,2))*shift_size*height
glyph[:,1] += shift
if random()<0.5:
ii = randint(0,n-1,size=(1))
xy = glyph[ii,:]
glyph = row_stack((glyph, xy))
return glyph
示例10: combine
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import row_stack [as 別名]
def combine(cls, constraints):
"""Create a new LinearConstraint by ANDing together several existing
LinearConstraints.
:arg constraints: An iterable of LinearConstraint objects. Their
:attr:`variable_names` attributes must all match.
:returns: A new LinearConstraint object.
"""
if not constraints:
raise ValueError("no constraints specified")
variable_names = constraints[0].variable_names
for constraint in constraints:
if constraint.variable_names != variable_names:
raise ValueError("variable names don't match")
coefs = np.row_stack([c.coefs for c in constraints])
constants = np.row_stack([c.constants for c in constraints])
return cls(variable_names, coefs, constants)
示例11: DataStru
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import row_stack [as 別名]
def DataStru(self):
self.datai['train'] = np.row_stack((np.array(self.yanzhneg_pr), np.array(self.yanzhneg_real))) # 此處添加行
self.datai['predict'] = np.row_stack((np.array(self.predi), np.array(self.preal)))
# 將訓練數據轉置
datapst = self.datai['train'].T
# 為訓練數據定義DataFrame的列名
mingcheng = ['第%s個模型列' % str(dd) for dd in list(range(len(self.datai['train']) - 1))] + [self.zi]
self.datai['train'] = pd.DataFrame(datapst, columns=mingcheng)
# 將預測數據轉置
dapst = self.datai['predict'].T
# 為訓練數據定義DataFrame的列名
mingche= ['第%s個模型列' % str(dd) for dd in list(range(len(self.datai['predict']) - 1))] + [self.zi]
self.datai['predict'] = pd.DataFrame(dapst, columns=mingche)
return print('二層的數據準備完畢')
# 定義均方誤差的函數
示例12: DataStru
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import row_stack [as 別名]
def DataStru(self):
self.datai['train'] = np.row_stack((np.array(self.yanzhneg_pr), np.array(self.yanzhneg_real))) # 此處添加行
self.datai['predict'] = np.row_stack((np.array(self.predi), np.array(self.preal)))
# 將訓練數據轉置
datapst = self.datai['train'].T
# 為訓練數據定義DataFrame的列名
mingcheng = ['第%s個模型列' % str(dd) for dd in list(range(len(self.datai['train']) - 1))] + [self.zi]
self.datai['train'] = pd.DataFrame(datapst, columns=mingcheng)
# 將預測數據轉置
dapst = self.datai['predict'].T
# 為訓練數據定義DataFrame的列名
mingche= ['第%s個模型列' % str(dd) for dd in list(range(len(self.datai['predict']) - 1))] + [self.zi]
self.datai['predict'] = pd.DataFrame(dapst, columns=mingche)
return print('二層的數據準備完畢')
# 創建將預測的多維的數字類別轉化為一維原始名稱類別的函數
示例13: get_raw_state
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import row_stack [as 別名]
def get_raw_state(self):
"""
Default state observation composer.
Returns:
and updates time-embedded environment state observation as [n,4] numpy matrix, where:
4 - number of signal features == state_shape[1],
n - time-embedding length == state_shape[0] == <set by user>.
Note:
`self.raw_state` is used to render environment `human` mode and should not be modified.
"""
self.raw_state = np.row_stack(
(
np.frombuffer(self.data.open.get(size=self.time_dim)),
np.frombuffer(self.data.high.get(size=self.time_dim)),
np.frombuffer(self.data.low.get(size=self.time_dim)),
np.frombuffer(self.data.close.get(size=self.time_dim)),
)
).T
return self.raw_state
示例14: get_raw_state
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import row_stack [as 別名]
def get_raw_state(self):
"""
Default state observation composer.
Returns:
and updates time-embedded environment state observation as [n, 4] numpy matrix, where:
4 - number of signal features == state_shape[1],
n - time-embedding length == state_shape[0] == <set by user>.
Note:
`self.raw_state` is used to render environment `human` mode and should not be modified.
"""
self.raw_state = np.row_stack(
(
np.frombuffer(self.data.open.get(size=self.time_dim)),
np.frombuffer(self.data.high.get(size=self.time_dim)),
np.frombuffer(self.data.low.get(size=self.time_dim)),
np.frombuffer(self.data.close.get(size=self.time_dim)),
)
).T
return self.raw_state
示例15: addingenhence_nodes
# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import row_stack [as 別名]
def addingenhence_nodes(self, data, label, step = 1, batchsize = 'auto'):
if batchsize == 'auto':
batchsize = data.shape[1]
mappingdata = self.mapping_generator.transform(data)
inputdata = self.transform(data)
localenhence_generator = node_generator()
extraenhence_nodes = localenhence_generator.generator_nodes(mappingdata,step,batchsize,self._enhence_function)
D = self.pesuedoinverse.dot(extraenhence_nodes)
C = extraenhence_nodes - inputdata.dot(D)
BT = self.pinv(C) if (C == 0).any() else np.mat((D.T.dot(D)+np.eye(D.shape[1]))).I.dot(D.T).dot(self.pesuedoinverse)
self.W = np.row_stack((self.W-D.dot(BT).dot(label),BT.dot(label)))
self.enhence_generator.update(localenhence_generator.Wlist,localenhence_generator.blist)
self.pesuedoinverse = np.row_stack((self.pesuedoinverse - D.dot(BT),BT))