本文整理汇总了Python中pysgpp.DataMatrix.setRow方法的典型用法代码示例。如果您正苦于以下问题:Python DataMatrix.setRow方法的具体用法?Python DataMatrix.setRow怎么用?Python DataMatrix.setRow使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pysgpp.DataMatrix
的用法示例。
在下文中一共展示了DataMatrix.setRow方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: cdf
# 需要导入模块: from pysgpp import DataMatrix [as 别名]
# 或者: from pysgpp.DataMatrix import setRow [as 别名]
def cdf(self, x):
# convert the parameter to the right format
if isList(x):
x = DataVector(x)
elif isNumerical(x):
x = DataVector([x])
elif isMatrix(x):
x = DataMatrix(x)
if isinstance(x, DataMatrix):
A = x
B = DataMatrix(A.getNrows(), A.getNcols())
B.setAll(0.0)
elif isinstance(x, DataVector):
A = DataMatrix(1, len(x))
A.setRow(0, x)
B = DataMatrix(1, len(x))
B.setAll(0)
# do the transformation
self.dist.cdf(A, B)
# transform the outcome
if isNumerical(x) or isinstance(x, DataVector):
return B.get(0, 0)
elif isinstance(x, DataMatrix):
return B.array()
示例2: __prepareDataContainer
# 需要导入模块: from pysgpp import DataMatrix [as 别名]
# 或者: from pysgpp.DataMatrix import setRow [as 别名]
def __prepareDataContainer(self, data, name):
"""
Prepare data for learning
@param data: dictionary loaded from UQSetting
@return dictionary {dtype: {t: <DataContainer>}}
"""
ans = {}
U = self.getParameters()\
.activeParams()\
.getIndependentJointDistribution()
for dtype in self.getKnowledgeTypes():
ans[dtype] = {}
dim = self.grid.getStorage().dim()
# prepare data container depending on the given knowledge type
tmp = KnowledgeTypes.transformData(data, U, dtype)
# load data for all time steps
for t, values in tmp.items():
size = len(values)
mydata = DataMatrix(size, dim)
sol = DataVector(size)
for i, (sample, res) in enumerate(values.items()):
p = DataVector(sample.getActiveUnit())
mydata.setRow(i, p)
sol[i] = float(res)
ans[dtype][t] = DataContainer(points=mydata, values=sol, name=name)
return ans
示例3: __getSamples
# 需要导入模块: from pysgpp import DataMatrix [as 别名]
# 或者: from pysgpp.DataMatrix import setRow [as 别名]
def __getSamples(self, W, T, n):
if self.samples is None:
# draw n ans
ans = W.rvs(n)
# transform them to the unit hypercube
ans = DataMatrix(n, W.getDim())
for i, sample in enumerate(ans):
p = T.probabilisticToUnit(sample)
ans.setRow(i, DataVector(p))
return ans
else:
if self.samples.shape[0] == n:
dataSamples = self.samples
else:
ixs = np.random.randint(0, len(self.samples), n)
dataSamples = self.samples[ixs, :]
# check if there are just samples for a subset of the random
# variables. If so, add the missing ones
if self.__ixs is not None:
ans = W.rvs(n)
# transform them to the unit hypercube
for i, sample in enumerate(dataSamples):
ans[i, :] = T.probabilisticToUnit(ans[i, :])
ans[i, self.__ixs] = sample
else:
ans = dataSamples
return DataMatrix(ans)
示例4: computeMoments
# 需要导入模块: from pysgpp import DataMatrix [as 别名]
# 或者: from pysgpp.DataMatrix import setRow [as 别名]
def computeMoments(self, ts=None):
names = ['time',
'iteration',
'grid_size',
'mean',
'meanDiscretizationError',
'var',
'varDiscretizationError']
# parameters
ts = self.__samples.keys()
nrows = len(ts)
ncols = len(names)
data = DataMatrix(nrows, ncols)
v = DataVector(ncols)
row = 0
for t in ts:
v.setAll(0.0)
v[0] = t
v[1] = 0
v[2] = len(self.__samples[t].values())
v[3], v[4] = self.mean(ts=[t])
v[5], v[6] = self.var(ts=[t])
# write results to matrix
data.setRow(row, v)
row += 1
return {'data': data,
'names': names}
示例5: checkPositivity
# 需要导入模块: from pysgpp import DataMatrix [as 别名]
# 或者: from pysgpp.DataMatrix import setRow [as 别名]
def checkPositivity(grid, alpha):
# define a full grid of maxlevel of the grid
gs = grid.getStorage()
fullGrid = Grid.createLinearGrid(gs.dim())
fullGrid.createGridGenerator().full(gs.getMaxLevel())
fullHashGridStorage = fullGrid.getStorage()
A = DataMatrix(fullHashGridStorage.size(), fullHashGridStorage.dim())
p = DataVector(gs.dim())
for i in xrange(fullHashGridStorage.size()):
fullHashGridStorage.get(i).getCoords(p)
A.setRow(i, p)
res = evalSGFunctionMulti(grid, alpha, A)
ymin, ymax, cnt = 0, -1e10, 0
for i, yi in enumerate(res.array()):
if yi < 0. and abs(yi) > 1e-13:
cnt += 1
ymin = min(ymin, yi)
ymax = max(ymax, yi)
A.getRow(i, p)
print " %s = %g" % (p, yi)
if cnt > 0:
print "warning: function is not positive"
print "%i/%i: [%g, %g]" % (cnt, fullHashGridStorage.size(), ymin, ymax)
return cnt == 0
示例6: merge
# 需要导入模块: from pysgpp import DataMatrix [as 别名]
# 或者: from pysgpp.DataMatrix import setRow [as 别名]
def merge(cls, containerList):
if len(containerList) == 0:
return None
# determine the total number of entries
size = 0
for container in containerList:
size += len(container.getValues())
dim = container.getPoints().getNcols()
# Copy data to the new DataVector's entry by entry
allPoints = DataMatrix(size, dim)
allValues = DataVector(size)
tmpVector = DataVector(dim)
i = 0
for container in containerList:
points = container.getPoints()
values = container.getValues()
for j in xrange(len(values)):
points.getRow(j, tmpVector)
allPoints.setRow(i, tmpVector)
allValues[i] = values[j]
i += 1
# return new DataContainer
return DataContainer(points=allPoints, values=allValues)
示例7: ppf
# 需要导入模块: from pysgpp import DataMatrix [as 别名]
# 或者: from pysgpp.DataMatrix import setRow [as 别名]
def ppf(self, x):
# convert the parameter to the right format
if isList(x):
x = DataVector(x)
elif isNumerical(x):
x = DataVector([x])
elif isMatrix(x):
x = DataMatrix(x)
if isinstance(x, DataMatrix):
A = x
B = DataMatrix(A.getNrows(), A.getNcols())
B.setAll(0.0)
elif isinstance(x, DataVector):
A = DataMatrix(1, len(x))
A.setRow(0, x)
B = DataMatrix(1, len(x))
B.setAll(0)
# do the transformation
opInvRosen = createOperationInverseRosenblattTransformationKDE(self.dist)
opInvRosen.doTransformation(A, B)
# transform the outcome
if isNumerical(x) or isinstance(x, DataVector):
return B.get(0, 0)
elif isinstance(x, DataMatrix):
return B.array()
示例8: dehierarchizeOnNewGrid
# 需要导入模块: from pysgpp import DataMatrix [as 别名]
# 或者: from pysgpp.DataMatrix import setRow [as 别名]
def dehierarchizeOnNewGrid(gridResult, grid, alpha):
# dehierarchization
gsResult = gridResult.getStorage()
ps = DataMatrix(gsResult.size(), gsResult.dim())
p = DataVector(gsResult.dim())
for i in xrange(gsResult.size()):
gsResult.get(i).getCoords(p)
ps.setRow(i, p)
nodalValues = evalSGFunctionMulti(grid, alpha, ps)
return nodalValues
示例9: general_test
# 需要导入模块: from pysgpp import DataMatrix [as 别名]
# 或者: from pysgpp.DataMatrix import setRow [as 别名]
def general_test(self, d, l, bb, xs):
test_desc = "dim=%d, level=%d, len(x)=%s" % (d, l, len(xs))
print test_desc
self.grid = Grid.createLinearGrid(d)
self.grid_gen = self.grid.createGridGenerator()
self.grid_gen.regular(l)
alpha = DataVector([self.get_random_alpha() for i in xrange(self.grid.getSize())])
bb_ = BoundingBox(d)
for d_k in xrange(d):
dimbb = DimensionBoundary()
dimbb.leftBoundary = bb[d_k][0]
dimbb.rightBoundary = bb[d_k][1]
bb_.setBoundary(d_k, dimbb)
# Calculate the expected value without the bounding box
expected_normal = [self.calc_exp_value_normal(x, d, bb, alpha) for x in xs]
#expected_transposed = [self.calc_exp_value_transposed(x, d, bb, alpha) for x in xs]
# Now set the bounding box
self.grid.getStorage().setBoundingBox(bb_)
dm = DataMatrix(len(xs), d)
for k, x in enumerate(xs):
dv = DataVector(x)
dm.setRow(k, dv)
multEval = createOperationMultipleEval(self.grid, dm)
actual_normal = DataVector(len(xs))
#actual_transposed = DataVector(len(xs))
multEval.mult(alpha, actual_normal)
#multEval.mult(alpha, actual_transposed)
actual_normal_list = []
for k in xrange(len(xs)):
actual_normal_list.append(actual_normal.__getitem__(k))
#actual_transposed_list = []
#for k in xrange(len(xs)):
# actual_transposed_list.append(actual_transposed.__getitem__(k))
self.assertAlmostEqual(actual_normal_list, expected_normal)
#self.assertAlmostEqual(actual_tranposed_list, expected_tranposed)
del self.grid
示例10: dehierarchize
# 需要导入模块: from pysgpp import DataMatrix [as 别名]
# 或者: from pysgpp.DataMatrix import setRow [as 别名]
def dehierarchize(grid, alpha):
# dehierarchization
gs = grid.getStorage()
p = DataVector(gs.dim())
nodalValues = DataVector(gs.size())
A = DataMatrix(gs.size(), gs.dim())
for i in xrange(gs.size()):
gs.get(i).getCoords(p)
A.setRow(i, p)
opEval = createOperationMultipleEval(grid, A)
opEval.mult(alpha, nodalValues)
return nodalValues
示例11: loadData
# 需要导入模块: from pysgpp import DataMatrix [as 别名]
# 或者: from pysgpp.DataMatrix import setRow [as 别名]
def loadData(self, name = "train"):
fin = self.__gzOpen(self.filename, "r")
data = []
classes = []
hasclass = False
# get the different section of ARFF-File
for line in fin:
sline = line.strip().lower()
if sline.startswith("%") or len(sline) == 0:
continue
if sline.startswith("@data"):
break
if sline.startswith("@attribute"):
value = sline.split()
if value[1].startswith("class"):
hasclass = True
else:
data.append([])
#read in the data stored in the ARFF file
for line in fin:
sline = line.strip()
if sline.startswith("%") or len(sline) == 0:
continue
values = sline.split(",")
if hasclass:
classes.append(float(values[-1]))
values = values[:-1]
for i in xrange(len(values)):
data[i].append(float(values[i]))
# cleaning up and return
fin.close()
dim = len(data)
size = len(data[0])
dataMatrix = DataMatrix(size, dim)
tempVector = DataVector(dim)
valuesVector = DataVector(size)
for rowIndex in xrange(size):
for colIndex in xrange(dim):
tempVector[colIndex] = data[colIndex][rowIndex]
dataMatrix.setRow(rowIndex, tempVector)
valuesVector[rowIndex] = classes[rowIndex]
return DataContainer(points=dataMatrix, values=valuesVector, name=name, filename=self.filename)
示例12: sampleGrids
# 需要导入模块: from pysgpp import DataMatrix [as 别名]
# 或者: from pysgpp.DataMatrix import setRow [as 别名]
def sampleGrids(self, filename):
ts = self.__learner.getTimeStepsOfInterest()
names = self.__params.getNames()
names.append('f_\\mathcal{I}(x)')
for t in ts:
grid, surplus = self.__knowledge.getSparseGridFunction(self._qoi, t)
# init
gs = grid.getStorage()
dim = gs.dim()
# -----------------------------------------
# do full grid sampling of sparse grid function
# -----------------------------------------
data = eval_fullGrid(4, dim)
res = evalSGFunctionMulti(grid, surplus, data)
data.transpose()
data.appendRow()
data.setRow(data.getNrows() - 1, res)
data.transpose()
# write results
writeDataARFF({'filename': "%s.t%f.samples.arff" % (filename, t),
'data': data,
'names': names})
# -----------------------------------------
# write sparse grid points to file
# -----------------------------------------
data = DataMatrix(gs.size(), dim)
data.setAll(0.0)
for i in xrange(gs.size()):
gp = gs.get(i)
v = np.array([gp.getCoord(j) for j in xrange(dim)])
data.setRow(i, DataVector(v))
# write results
writeDataARFF({'filename': "%s.t%f.gridpoints.arff" % (filename, t),
'data': data,
'names': names})
# -----------------------------------------
# write alpha
# -----------------------------------------
writeAlphaARFF("%s.t%f.alpha.arff" % (filename, t),
surplus)
示例13: getDataSubsetByIndexList
# 需要导入模块: from pysgpp import DataMatrix [as 别名]
# 或者: from pysgpp.DataMatrix import setRow [as 别名]
def getDataSubsetByIndexList(self, indices, name="train"):
size = len(indices)
subset_points = DataMatrix(size, self.dim)
subset_values = DataVector(size)
row = DataVector(self.dim)
points = self.getPoints()
values = self.getValues()
i = 0
for index in indices:
points.getRow(index, row)
subset_points.setRow(i, row)
subset_values[i] = values[index]
i = i + 1
return DataContainer(points=subset_points, values=subset_values, name=name)
示例14: writeSensitivityValues
# 需要导入模块: from pysgpp import DataMatrix [as 别名]
# 或者: from pysgpp.DataMatrix import setRow [as 别名]
def writeSensitivityValues(self, filename):
def keymap(key):
names = self.getLearner().getParameters().activeParams().getNames()
ans = [names[i] for i in key]
return ",".join(ans)
# parameters
ts = self.__knowledge.getAvailableTimeSteps()
gs = self.__knowledge.getGrid(self._qoi).getStorage()
n = len(ts)
n1 = gs.dim()
n2 = 2 ** n1 - 1
data = DataMatrix(n, n1 + n2 + 1)
names = ['time'] + [None] * (n1 + n2)
for k, t in enumerate(ts):
# estimated anova decomposition
anova = self.getAnovaDecomposition(t=t)
me = anova.getSobolIndices()
if len(me) != n2:
import ipdb; ipdb.set_trace()
n2 = len(me)
te = anova.getTotalEffects()
n1 = len(te)
v = DataVector(n1 + n2 + 1)
v.setAll(0.0)
v[0] = t
for i, key in enumerate(anova.getSortedPermutations(te.keys())):
v[i + 1] = te[key]
if k == 0:
names[i + 1] = '"$T_{' + keymap(key) + '}$"'
for i, key in enumerate(anova.getSortedPermutations(me.keys())):
v[n1 + i + 1] = me[key]
if k == 0:
names[n1 + 1 + i] = '"$S_{' + keymap(key) + '}$"'
data.setRow(k, v)
writeDataARFF({'filename': filename + ".sa.stats.arff",
'data': data,
'names': names})
示例15: eval_fullGrid
# 需要导入模块: from pysgpp import DataMatrix [as 别名]
# 或者: from pysgpp.DataMatrix import setRow [as 别名]
def eval_fullGrid(level, dim, border=True):
if border:
grid = Grid.createLinearBoundaryGrid(dim)
else:
grid = Grid.createLinearGrid(dim)
grid.createGridGenerator().full(level)
gs = grid.getStorage()
ans = DataMatrix(gs.size(), dim)
p = DataVector(dim)
for i in xrange(gs.size()):
gs.get(i).getCoords(p)
ans.setRow(i, p)
return ans