本文整理汇总了Python中pysgpp.DataMatrix类的典型用法代码示例。如果您正苦于以下问题:Python DataMatrix类的具体用法?Python DataMatrix怎么用?Python DataMatrix使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DataMatrix类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: computeTrilinearFormByList
def computeTrilinearFormByList(self,
gpsk, basisk, alphak,
gpsi, basisi,
gpsj, basisj):
"""
Compute trilinear form for two lists of grid points
@param gpsk: list of HashGridIndex
@param basisk: SG++ basis for grid indices gpsk
@param alphak: coefficients for kth grid
@param gpsi: list of HashGridIndex
@param basisi: SG++ basis for grid indices gpsi
@param gpsj: list of HashGridIndex
@param basisj: SG++ basis for grid indices gpsj
@return: DataMatrix
"""
print "# evals: %i^2 * %i = %i" % (len(gpsi), len(gpsk), len(gpsi) ** 2 * len(gpsk))
A = DataMatrix(len(gpsi), len(gpsj))
err = 0.
# run over all rows
for i, gpi in enumerate(gpsi):
# run over all columns
for j, gpj in enumerate(gpsj):
# run over all gpks
b, erri = self.computeTrilinearFormByRow(gpsk, basisk,
gpi, basisi,
gpj, basisj)
# get the overall contribution in the current dimension
value = alphak.dotProduct(b)
A.set(i, j, value)
# error statistics
err += erri
return A, err
示例2: merge
def merge(cls, containerList):
if len(containerList) == 0:
return None
# determine the total number of entries
size = 0
for container in containerList:
size += len(container.getValues())
dim = container.getPoints().getNcols()
# Copy data to the new DataVector's entry by entry
allPoints = DataMatrix(size, dim)
allValues = DataVector(size)
tmpVector = DataVector(dim)
i = 0
for container in containerList:
points = container.getPoints()
values = container.getValues()
for j in xrange(len(values)):
points.getRow(j, tmpVector)
allPoints.setRow(i, tmpVector)
allValues[i] = values[j]
i += 1
# return new DataContainer
return DataContainer(points=allPoints, values=allValues)
示例3: __getSamples
def __getSamples(self, W, T, n):
if self.samples is None:
# draw n ans
ans = W.rvs(n)
# transform them to the unit hypercube
ans = DataMatrix(n, W.getDim())
for i, sample in enumerate(ans):
p = T.probabilisticToUnit(sample)
ans.setRow(i, DataVector(p))
return ans
else:
if self.samples.shape[0] == n:
dataSamples = self.samples
else:
ixs = np.random.randint(0, len(self.samples), n)
dataSamples = self.samples[ixs, :]
# check if there are just samples for a subset of the random
# variables. If so, add the missing ones
if self.__ixs is not None:
ans = W.rvs(n)
# transform them to the unit hypercube
for i, sample in enumerate(dataSamples):
ans[i, :] = T.probabilisticToUnit(ans[i, :])
ans[i, self.__ixs] = sample
else:
ans = dataSamples
return DataMatrix(ans)
示例4: checkPositivity
def checkPositivity(grid, alpha):
# define a full grid of maxlevel of the grid
gs = grid.getStorage()
fullGrid = Grid.createLinearGrid(gs.dim())
fullGrid.createGridGenerator().full(gs.getMaxLevel())
fullHashGridStorage = fullGrid.getStorage()
A = DataMatrix(fullHashGridStorage.size(), fullHashGridStorage.dim())
p = DataVector(gs.dim())
for i in xrange(fullHashGridStorage.size()):
fullHashGridStorage.get(i).getCoords(p)
A.setRow(i, p)
res = evalSGFunctionMulti(grid, alpha, A)
ymin, ymax, cnt = 0, -1e10, 0
for i, yi in enumerate(res.array()):
if yi < 0. and abs(yi) > 1e-13:
cnt += 1
ymin = min(ymin, yi)
ymax = max(ymax, yi)
A.getRow(i, p)
print " %s = %g" % (p, yi)
if cnt > 0:
print "warning: function is not positive"
print "%i/%i: [%g, %g]" % (cnt, fullHashGridStorage.size(), ymin, ymax)
return cnt == 0
示例5: computePiecewiseConstantBF
def computePiecewiseConstantBF(grid, U, admissibleSet):
# create bilinear form of the grid
gs = grid.getStorage()
A = DataMatrix(gs.size(), gs.size())
createOperationLTwoDotExplicit(A, grid)
# multiply the entries with the pdf at the center of the support
p = DataVector(gs.dim())
q = DataVector(gs.dim())
B = DataMatrix(admissibleSet.getSize(), gs.size())
b = DataVector(admissibleSet.getSize())
# s = np.ndarray(gs.dim(), dtype='float')
for k, gpi in enumerate(admissibleSet.values()):
i = gs.seq(gpi)
gpi.getCoords(p)
for j in xrange(gs.size()):
gs.get(j).getCoords(q)
# for d in xrange(gs.dim()):
# # get level index
# xlow = max(p[0], q[0])
# xhigh = min(p[1], q[1])
# s[d] = U[d].cdf(xhigh) - U[d].cdf(xlow)
y = float(A.get(i, j) * U.pdf(p))
B.set(k, j, y)
if i == j:
b[k] = y
return B, b
示例6: testOperationB
def testOperationB(self):
from pysgpp import Grid, DataVector, DataMatrix
factory = Grid.createLinearBoundaryGrid(1)
gen = factory.createGridGenerator()
gen.regular(2)
alpha = DataVector(factory.getStorage().size())
p = DataMatrix(1,1)
beta = DataVector(1)
alpha.setAll(0.0)
p.set(0,0,0.25)
beta[0] = 1.0
opb = factory.createOperationB()
opb.mult(beta, p, alpha)
self.failUnlessAlmostEqual(alpha[0], 0.75)
self.failUnlessAlmostEqual(alpha[1], 0.25)
self.failUnlessAlmostEqual(alpha[2], 0.5)
self.failUnlessAlmostEqual(alpha[3], 1.0)
self.failUnlessAlmostEqual(alpha[4], 0.0)
alpha.setAll(0.0)
alpha[2] = 1.0
p.set(0,0, 0.25)
beta[0] = 0.0
opb.multTranspose(alpha, p, beta)
self.failUnlessAlmostEqual(beta[0], 0.5)
示例7: writeSurplusesLevelWise
def writeSurplusesLevelWise(self, filename):
# locate all knowledge types available
dtypes = self.__learner.getKnowledgeTypes()
names = ['level']
for dtype in dtypes:
names.append("surplus_%s" % KnowledgeTypes.toString(dtype))
ts = self.__knowledge.getAvailableTimeSteps()
for t in ts:
# collect all the surpluses classifying them by level sum
data = {}
n = 0
for dtype in dtypes:
data[dtype] = self.computeSurplusesLevelWise(t, dtype)
n = sum([len(values) for values in data[dtype].values()])
A = DataMatrix(n, len(names))
# add them to a matrix structure
for i, dtype in enumerate(dtypes):
k = 0
for level, surpluses in data[dtype].items():
for j, surplus in enumerate(surpluses):
A.set(k + j, i + 1, surplus)
A.set(k + j, 0, level)
k += len(surpluses)
writeDataARFF({'filename': "%s.t%s.surpluses.arff" % (filename, t),
'data': A,
'names': names})
示例8: __prepareDataContainer
def __prepareDataContainer(self, data, name):
"""
Prepare data for learning
@param data: dictionary loaded from UQSetting
@return dictionary {dtype: {t: <DataContainer>}}
"""
ans = {}
U = self.getParameters()\
.activeParams()\
.getIndependentJointDistribution()
for dtype in self.getKnowledgeTypes():
ans[dtype] = {}
dim = self.grid.getStorage().dim()
# prepare data container depending on the given knowledge type
tmp = KnowledgeTypes.transformData(data, U, dtype)
# load data for all time steps
for t, values in tmp.items():
size = len(values)
mydata = DataMatrix(size, dim)
sol = DataVector(size)
for i, (sample, res) in enumerate(values.items()):
p = DataVector(sample.getActiveUnit())
mydata.setRow(i, p)
sol[i] = float(res)
ans[dtype][t] = DataContainer(points=mydata, values=sol, name=name)
return ans
示例9: computeMoments
def computeMoments(self, ts=None):
names = ['time',
'iteration',
'grid_size',
'mean',
'meanDiscretizationError',
'var',
'varDiscretizationError']
# parameters
ts = self.__samples.keys()
nrows = len(ts)
ncols = len(names)
data = DataMatrix(nrows, ncols)
v = DataVector(ncols)
row = 0
for t in ts:
v.setAll(0.0)
v[0] = t
v[1] = 0
v[2] = len(self.__samples[t].values())
v[3], v[4] = self.mean(ts=[t])
v[5], v[6] = self.var(ts=[t])
# write results to matrix
data.setRow(row, v)
row += 1
return {'data': data,
'names': names}
示例10: testOperationTest_test
def testOperationTest_test(self):
from pysgpp import Grid, DataVector, DataMatrix
factory = Grid.createLinearBoundaryGrid(1)
gen = factory.createGridGenerator()
gen.regular(1)
alpha = DataVector(factory.getStorage().size())
data = DataMatrix(1,1)
data.setAll(0.25)
classes = DataVector(1)
classes.setAll(1.0)
testOP = factory.createOperationTest()
alpha[0] = 0.0
alpha[1] = 0.0
alpha[2] = 1.0
c = testOP.test(alpha, data, classes)
self.failUnless(c > 0.0)
alpha[0] = 0.0
alpha[1] = 0.0
alpha[2] = -1.0
c = testOP.test(alpha, data, classes)
self.failUnless(c == 0.0)
示例11: plotGrid
def plotGrid(self, learner, suffix):
from mpl_toolkits.mplot3d.axes3d import Axes3D
import matplotlib.pyplot as plt
xs = np.linspace(0, 1, 30)
ys = np.linspace(0, 1, 30)
X, Y = np.meshgrid(xs, ys)
Z = zeros(np.shape(X))
input = DataMatrix(np.shape(Z)[0]*np.shape(Z)[1], 2)
r = 0
for i in xrange(np.shape(Z)[0]):
for j in xrange(np.shape(Z)[1]):
input.set(r, 0, X[i,j])
input.set(r, 1, Y[i,j])
r += 1
result = learner.applyData(input)
r = 0
for i in xrange(np.shape(Z)[0]):
for j in xrange(np.shape(Z)[1]):
Z[i,j] = result[r]
r += 1
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_wireframe(X,Y,Z)
#plt.draw()
plt.savefig("grid3d_%s_%i.png" % (suffix, learner.iteration))
fig.clf()
plt.close(plt.gcf())
示例12: setUp
def setUp(self):
self.size = 9
self.level = 4
points = DataMatrix(self.size, 1)
values = DataVector(self.size)
for i in xrange(self.size):
points.set(i, 0, i)
values[i] = -1 if i < self.size/2 else 1
self.dataContainer = DataContainer(points=points, values=values)
self.policy = StratifiedFoldingPolicy(self.dataContainer, self.level)
示例13: dehierarchizeOnNewGrid
def dehierarchizeOnNewGrid(gridResult, grid, alpha):
# dehierarchization
gsResult = gridResult.getStorage()
ps = DataMatrix(gsResult.size(), gsResult.dim())
p = DataVector(gsResult.dim())
for i in xrange(gsResult.size()):
gsResult.get(i).getCoords(p)
ps.setRow(i, p)
nodalValues = evalSGFunctionMulti(grid, alpha, ps)
return nodalValues
示例14: setUp
def setUp(self):
self.size = 11
self.level = 10
points = DataMatrix(self.size, 1)
values = DataVector(self.size)
for i in xrange(self.size):
points.set(i, 0, i)
values[i] = i
self.dataContainer = DataContainer(points=points, values=values)
self.policy = SequentialFoldingPolicy(self.dataContainer, self.level)
示例15: buildTrainingVector
def buildTrainingVector(data):
from pysgpp import DataMatrix
dim = len(data["data"])
training = DataMatrix(len(data["data"][0]), dim)
# i iterates over the data points, d over the dimension of one data point
for i in xrange(len(data["data"][0])):
for d in xrange(dim):
training.set(i, d, data["data"][d][i])
return training