本文整理汇总了Python中pysgpp.DataVector.array方法的典型用法代码示例。如果您正苦于以下问题:Python DataVector.array方法的具体用法?Python DataVector.array怎么用?Python DataVector.array使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pysgpp.DataVector
的用法示例。
在下文中一共展示了DataVector.array方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __computeRanking
# 需要导入模块: from pysgpp import DataVector [as 别名]
# 或者: from pysgpp.DataVector import array [as 别名]
def __computeRanking(self, v, A, b):
"""
Compute ranking for variance estimation
\argmax_{i \in \A} | v (2 Av - vb) |
@param v: DataVector, coefficients of known grid points
@param A: DataMatrix, stiffness matrix
@param b: DataVector, squared expectation value contribution
@return: numpy array, contains the ranking for the given samples
"""
# update the ranking
av = DataVector(A.getNrows())
av.setAll(0.0)
# = Av
for i in xrange(A.getNrows()):
for j in xrange(A.getNcols()):
av[i] += A.get(i, j) * v[j]
av.mult(2.) # = 2 * Av
b.componentwise_mult(v) # = v * b
av.sub(b) # = 2 * Av - v * b
w = DataVector(v)
w.componentwise_mult(av) # = v * (2 * Av - v * b)
w.abs() # = | v * (2 * Av - v * b) |
return w.array()
示例2: plotSG3d
# 需要导入模块: from pysgpp import DataVector [as 别名]
# 或者: from pysgpp.DataVector import array [as 别名]
def plotSG3d(grid, alpha, n=50, f=lambda x: x):
fig = plt.figure()
ax = fig.gca(projection='3d')
X = np.linspace(0, 1, n)
Y = np.linspace(0, 1, n)
X, Y = np.meshgrid(X, Y)
Z = np.zeros(n * n).reshape(n, n)
for i in xrange(len(X)):
for j, (x, y) in enumerate(zip(X[i], Y[i])):
Z[i, j] = f(evalSGFunction(grid, alpha, DataVector([x, y])))
# get grid points
gs = grid.getStorage()
gps = np.zeros([gs.size(), 2])
p = DataVector(2)
for i in xrange(gs.size()):
gs.get(i).getCoords(p)
gps[i, :] = p.array()
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0, antialiased=False)
ax.scatter(gps[:, 0], gps[:, 1], np.zeros(gs.size()))
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
# ax.set_zlim(0, 2)
fig.colorbar(surf, shrink=0.5, aspect=5)
return fig, ax, Z
示例3: serializeToFile
# 需要导入模块: from pysgpp import DataVector [as 别名]
# 或者: from pysgpp.DataVector import array [as 别名]
def serializeToFile(self, memento, filename):
fstream = self.gzOpen(filename, "w")
try:
figure = plt.figure()
grid = memento
storage = grid.getStorage()
coord_vector = DataVector(storage.dim())
points = zeros([storage.size(), storage.dim()])
for i in xrange(storage.size()):
point = storage.get(i)
point.getCoords(coord_vector)
points[i] = [j for j in coord_vector.array()]
num_of_sublots = storage.dim()*(storage.dim()-1)/2
rows = int(ceil(sqrt(num_of_sublots)))
cols = int(floor(sqrt(num_of_sublots)))
i = 1
for x1 in xrange(1,storage.dim()):
for x2 in xrange(2,storage.dim()+1):
figure.add_subplot(rows*100 + cols*10 + i)
figure.add_subplot(rows, cols, i)
plt.xlabel('x%d'%x1, figure=figure)
plt.ylabel('x%d'%x2, figure=figure)
plt.scatter(points[:,x1-1], points[:,x2-1], figure=figure)
i +=1
plt.savefig(fstream, figure=figure)
plt.close(figure)
finally:
fstream.close()
示例4: gradient_fun
# 需要导入模块: from pysgpp import DataVector [as 别名]
# 或者: from pysgpp.DataVector import array [as 别名]
def gradient_fun(self, params):
'''
Compute the gradient vector in the current state
'''
#import ipdb; ipdb.set_trace() #
gradient_array = np.empty((self.batch_size, self.grid.getSize()))
for sample_idx in xrange(self.batch_size):
x = self._lastseen[sample_idx, :self.dim]
y = self._lastseen[sample_idx, self.dim]
params_DV = DataVector(params)
gradient = DataVector(len(params_DV))
single_alpha = DataVector(1)
single_alpha[0] = 1
data_matrix = DataMatrix(x.reshape(1,-1))
mult_eval = createOperationMultipleEval(self.grid, data_matrix);
mult_eval.multTranspose(single_alpha, gradient);
residual = gradient.dotProduct(params_DV) - y;
gradient.mult(residual);
#import ipdb; ipdb.set_trace() #
gradient_array[sample_idx, :] = gradient.array()
return gradient_array
示例5: plotGrid2d
# 需要导入模块: from pysgpp import DataVector [as 别名]
# 或者: from pysgpp.DataVector import array [as 别名]
def plotGrid2d(grid, alpha=None):
gs = grid.getStorage()
gps = {'p': np.zeros([0, 2]),
'n': np.zeros([0, 2])}
p = DataVector(2)
for i in xrange(gs.size()):
gs.get(i).getCoords(p)
if alpha is None or alpha[i] >= 0:
gps['p'] = np.vstack((gps['p'], p.array()))
else:
gps['n'] = np.vstack((gps['n'], p.array()))
# plot the grid points
plt.plot(gps['p'][:, 0], gps['p'][:, 1], "^ ", color='red')
plt.plot(gps['n'][:, 0], gps['n'][:, 1], "v ", color='red')
plt.xlim(0, 1)
plt.ylim(0, 1)
示例6: getCollocationNodes
# 需要导入模块: from pysgpp import DataVector [as 别名]
# 或者: from pysgpp.DataVector import array [as 别名]
def getCollocationNodes(self):
"""
Create a set of all collocation nodes
"""
gs = self.grid.getStorage()
ps = np.ndarray([gs.size(), gs.dim()], dtype='float32')
p = DataVector(gs.dim())
for i in xrange(gs.size()):
gs.get(i).getCoords(p)
ps[i, :] = p.array()
return ps
示例7: nextSamples
# 需要导入模块: from pysgpp import DataVector [as 别名]
# 或者: from pysgpp.DataVector import array [as 别名]
def nextSamples(self, n=1):
p = DataVector(self._dim)
ans = Samples(self._params, dtype=DistributionType.UNITUNIFORM)
U = self._params.activeParams().getIndependentJointDistribution()
for _ in xrange(n):
self.__genObj.getSample(p)
# transform it to the probabilistic space
q = U.ppf(p.array())
# add it to the output
ans.add(q, dtype=SampleType.ACTIVEPROBABILISTIC)
return ans
示例8: refineGrid
# 需要导入模块: from pysgpp import DataVector [as 别名]
# 或者: from pysgpp.DataVector import array [as 别名]
def refineGrid(self):
# load the time steps we use for refinement
# refinets = self.getRefinement().getAdaptTimeWindow()
refinets = self.getTimeStepsOfInterest()
oldGridSize = self.getGrid().getSize()
oldAdmissibleSetSize = self.getRefinement().getAdmissibleSet().getSize()
# refine
newCollocationNodes = self.getRefinement().refineGrid(self, refinets)
# increase counter
self.iteration += 1
# print some information
if self._verbose:
print "iteration: %i" % self.iteration
print "old grid size: %i" % oldGridSize
print "old AS size: %i" % oldAdmissibleSetSize
print "new collocation nodes: %i" % len(newCollocationNodes)
print "new grid size:", self.getGrid().getSize()
print "new AS size: %i" % self.getRefinement()\
.getAdmissibleSet()\
.getSize()
# fig = plotGrid(self.__grid, self.__knowledge.getAlpha(self.getQoI()),
# self.getRefinement().getAdmissibleSetCreator()
# .getAdmissibleSet(),
# self.getParameters(), newCollocationNodes)
# fig.savefig('%i.png' % self._learner.iteration)
# parse them to a numpy array
gs = self.grid.getStorage()
p = DataVector(gs.dim())
ans = np.ndarray([len(newCollocationNodes), gs.dim()], dtype='float32')
for i, gp in enumerate(newCollocationNodes):
gp.getCoords(p)
ans[i, :] = p.array()
return ans
示例9: pdf
# 需要导入模块: from pysgpp import DataVector [as 别名]
# 或者: from pysgpp.DataVector import array [as 别名]
def pdf(self, x):
# convert the parameter to the right format
if isList(x):
x = DataVector(x)
elif isNumerical(x):
x = DataVector([x])
if isinstance(x, DataMatrix):
A = x
res = DataVector(A.getNrows())
res.setAll(0.0)
elif isinstance(x, DataVector):
A = DataMatrix(1, len(x))
A.setRow(0, x)
res = DataVector(1)
res.setAll(0)
self.dist.pdf(A, res)
if len(res) == 1:
return res[0]
else:
return res.array()
示例10: computeErrors
# 需要导入模块: from pysgpp import DataVector [as 别名]
# 或者: from pysgpp.DataVector import array [as 别名]
def computeErrors(jgrid, jalpha, grid, alpha, f, n=200):
"""
Compute some errors to estimate the quality of the
interpolation.
@param jgrid: Grid, new discretization
@param jalpha: DataVector, new surpluses
@param grid: Grid, old discretization
@param alpha: DataVector, old surpluses
@param f: function, to be interpolated
@param n: int, number of Monte Carlo estimates for error estimation
@return: tuple(<float>, <float>), maxdrift, l2norm
"""
jgs = jgrid.getStorage()
# create control samples
samples = DataMatrix(np.random.rand(n, jgs.dim()))
# evaluate the sparse grid functions
jnodalValues = evalSGFunctionMulti(jgrid, jalpha, samples)
nodalValues = evalSGFunctionMulti(grid, alpha, samples)
# compute errors
p = DataVector(jgs.dim())
err = DataVector(n)
for i in xrange(n):
samples.getRow(i, p)
y = f(p.array(), nodalValues[i])
err[i] = abs(y - jnodalValues[i])
# get error statistics
# l2
l2norm = err.l2Norm()
# maxdrift
err.abs()
maxdrift = err.max()
return maxdrift, l2norm
示例11: ppf
# 需要导入模块: from pysgpp import DataVector [as 别名]
# 或者: from pysgpp.DataVector import array [as 别名]
def ppf(self, x):
# convert the parameter to the right format
if isList(x):
x = DataVector(x)
elif isNumerical(x):
x = DataVector([x])
# do the transformation
if self.grid.getStorage().dim() == 1:
op = createOperationInverseRosenblattTransformation1D(self.grid)
ans = np.ndarray(len(x))
for i, xi in enumerate(x.array()):
ans[i] = op.doTransformation1D(self.alpha, xi)
if len(ans) == 1:
return ans[0]
else:
return ans
else:
if isinstance(x, DataMatrix):
A = x
B = DataMatrix(A.getNrows(), A.getNcols())
B.setAll(0.0)
elif isinstance(x, DataVector):
A = DataMatrix(1, len(x))
A.setRow(0, x)
B = DataMatrix(1, len(x))
B.setAll(0)
# do the transformation
op = createOperationInverseRosenblattTransformation(self.grid)
op.doTransformation(self.alpha, A, B)
# extract the outcome
if isNumerical(x) or isinstance(x, DataVector):
return B.get(0, 0)
elif isinstance(x, DataMatrix):
return B.array()
示例12: discretizeFunction
# 需要导入模块: from pysgpp import DataVector [as 别名]
# 或者: from pysgpp.DataVector import array [as 别名]
def discretizeFunction(f, bounds, level=2, hasBorder=False, *args, **kws):
# define linear transformation to the unit hyper cube
T = JointTransformation()
for xlim in bounds:
T.add(LinearTransformation(xlim[0], xlim[1]))
# create grid
dim = len(bounds)
# create adequate grid
if hasBorder:
grid = Grid.createLinearBoundaryGrid(dim)
else:
grid = Grid.createLinearGrid(dim)
# init storage
grid.createGridGenerator().regular(level)
gs = grid.getStorage()
# discretize on given level
p = DataVector(dim)
nodalValues = DataVector(gs.size())
for i in xrange(gs.size()):
gs.get(i).getCoords(p)
# transform to the right space
q = T.unitToProbabilistic(p.array())
# apply the given function
nodalValues[i] = float(f(q))
# hierarchize
alpha = hierarchize(grid, nodalValues)
# estimate the l2 error
err = estimateDiscreteL2Error(grid, alpha, f)
# TODO: adaptive refinement
return grid, alpha, err
示例13: computeCoefficients
# 需要导入模块: from pysgpp import DataVector [as 别名]
# 或者: from pysgpp.DataVector import array [as 别名]
def computeCoefficients(jgrid, grid, alpha, f):
"""
Interpolate function f, which depends on some sparse grid function
(grid, alpha) on jgrid
@param jgrid: Grid, new discretization
@param grid: Grid, old discretization
@param alpha: DataVector, surpluses for grid
@param f: function, to be interpolated
@return: DataVector, surpluses for jgrid
"""
jgs = jgrid.getStorage()
# dehierarchization
p = DataVector(jgs.dim())
A = DataMatrix(jgs.size(), jgs.dim())
for i in xrange(jgs.size()):
jgs.get(i).getCoords(p)
A.setRow(i, p)
nodalValues = evalSGFunctionMulti(grid, alpha, A)
# apply f to all grid points
jnodalValues = DataVector(jgs.size())
for i in xrange(len(nodalValues)):
A.getRow(i, p)
# print i, p.array(), nodalValues[i], alpha.min(), alpha.max()
# if nodalValues[i] < -1e20 or nodalValues[i] > 1e20:
# from pysgpp.extensions.datadriven.uq.operations import evalSGFunction, evalSGFunctionMultiVectorized
# print alpha.min(), alpha.max()
# print evalSGFunction(grid, alpha, p)
# print evalSGFunctionMulti(grid, alpha, DataMatrix([p.array()]))
# print evalSGFunctionMultiVectorized(grid, alpha, DataMatrix([p.array()]))
# import ipdb; ipdb.set_trace()
jnodalValues[i] = f(p.array(), nodalValues[i])
jalpha = hierarchize(jgrid, jnodalValues)
return jalpha
示例14: __init__
# 需要导入模块: from pysgpp import DataVector [as 别名]
# 或者: from pysgpp.DataVector import array [as 别名]
def __init__(self, **kwargs):
self.points = {}
self.values = {}
self.dataDict = {}
self.specifications = {}
if kwargs is None:
raise Exception("Argument list is empty")
try:
if kwargs.has_key('adapter'): #takes (adapter: DataAdapter)
adapter = kwargs['adapter']
container = adapter.loadData()
self.points = container.points
self.values = container.values
self.dim = container.dim
self.size = container.size
self.specifications = container.specifications
self.name = container.name
else:
if kwargs.has_key('size') and kwargs.has_key('dim'): #takes (size: int, dim: int, name="train")
self.name = kwargs.get('name', self.TRAIN_CATEGORY)
self.size = kwargs['size']
self.dim = kwargs['dim']
self.points[self.name] = DataMatrix(self.size, self.dim)
self.values[self.name] = DataVector(self.size)
specification = DataSpecification()
specification.createNumericAttributes(self.dim)
self.specifications[self.name] = specification
elif kwargs.has_key('points') and kwargs.has_key('values'): #takes (points: DataVector, values: DataVector, name="train", filename=None)
self.name = kwargs.get('name', self.TRAIN_CATEGORY)
if isinstance(kwargs['points'], DataMatrix):
self.points[self.name] = kwargs['points']
else:
self.points[self.name] = DataMatrix(kwargs['points'])
if isinstance(kwargs['values'], DataVector):
self.values[self.name] = kwargs['values']
else:
self.values[self.name] = DataVector(kwargs['values'])
# creating dictionary for fast search point -> value
self.dataDict[self.name] = {}
p = DataVector(self.points[self.name].getNcols())
for i in xrange(self.points[self.name].getNrows()):
self.points[self.name].getRow(i, p)
key = tuple(p.array())
self.dataDict[self.name][key] = self.values[self.name][i]
self.size = self.points[self.name].getNrows()
self.dim = self.points[self.name].getNcols()
specification = DataSpecification()
specification.createNumericAttributes(self.dim)
# if data comes from a file, note it in the specification
filename = kwargs.get('filename', None)
if not filename is None:
specification.setFilename(filename)
specification.setSaved()
self.specifications[self.name] = specification
self.tempPoint = DataVector(self.dim)
self.tempValue = DataVector(1)
except IndexError:
raise Exception('Wrong or no attributes in constructor')
示例15: LibAGFDist
# 需要导入模块: from pysgpp import DataVector [as 别名]
# 或者: from pysgpp.DataVector import array [as 别名]
class LibAGFDist(Dist):
"""
The Sparse Grid Density Estimation (SGDE) distribution
"""
def __init__(self,
trainData,
samples=None,
testData=None,
bandwidths=None,
transformation=None,
surfaceFile=None):
super(LibAGFDist, self).__init__()
self.trainData = DataMatrix(trainData)
self.testData = testData
self.bounds = [[0, 1] for _ in xrange(trainData.shape[1])]
if len(self.bounds) == 1:
self.bounds = self.bounds[0]
if transformation is not None:
self.bounds = [trans.getBounds()
for trans in transformation.getTransformations()]
self.dim = trainData.shape[1]
self.samples = samples
self.transformation = transformation
self.bandwidths = None
if bandwidths is not None:
self.bandwidths = bandwidths
else:
op = createOperationInverseRosenblattTransformationKDE(self.trainData)
self.bandwidths = DataVector(self.dim)
op.getOptKDEbdwth(self.bandwidths)
self.surfaceFile = surfaceFile
@classmethod
def byConfig(cls, config):
if config is not None and os.path.exists(config):
# init density function
traindatafile, samplefile, testFile, testOutFile, bandwidthFile, surfaceFile = \
cls.computeDensity(config)
return cls.byFiles(traindatafile, samplefile,
testFile, testOutFile,
bandwidthFile, surfaceFile)
@classmethod
def byFiles(cls, trainDataFile,
samplesFile=None,
testFile=None,
testOutFile=None,
bandwidthFile=None,
surfaceFile=None):
# load training file
if os.path.exists(trainDataFile):
trainData = np.loadtxt(trainDataFile)
if len(trainData.shape) == 1:
trainData = np.array([trainData]).transpose()
else:
raise Exception('The training data file "%s" does not exist' % trainDataFile)
# load samples for quadrature
samples = None
if samplesFile is not None:
if os.path.exists(samplesFile):
samples = np.loadtxt(samplesFile)
# if the data is just one dimensional -> transform to
# matrix with one column
if len(samples.shape) == 1:
samples = np.array([samples]).transpose()
# load test file for evaluating pdf values
testData = None
if testFile is not None:
if os.path.exists(testFile):
testData = np.loadtxt(testFile)
# if the data is just one dimensional -> transform to
# matrix with one column
if len(testData.shape) == 1:
testData = np.array([testData]).transpose()
# load bandwidths file for evaluating pdf values
bandwidths = None
if bandwidthFile is not None:
if os.path.exists(bandwidthFile):
bandwidths = np.loadtxt(bandwidthFile)
# load pdf values for testSamples if available
if testOutFile is not None:
if os.path.exists(testOutFile):
testLikelihood = np.loadtxt(testOutFile)
# store the results in a hash map
if testData is not None:
testDataEval = {}
for i, sample in enumerate(testData):
testDataEval[tuple(sample)] = testLikelihood[i]
if surfaceFile is not None and not os.path.exists(surfaceFile):
surfaceFile = None
return cls(trainData,
#.........这里部分代码省略.........