本文整理汇总了Python中numpy.asmatrix函数的典型用法代码示例。如果您正苦于以下问题:Python asmatrix函数的具体用法?Python asmatrix怎么用?Python asmatrix使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了asmatrix函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: handle_monocular
def handle_monocular(self, msg):
(image, camera) = msg
gray = self.mkgray(image)
C = self.image_corners(gray)
if C:
linearity_rms = self.mc.linear_error(C, self.board)
# Add in reprojection check
image_points = C
object_points = self.mc.mk_object_points([self.board], use_board_size=True)[0]
dist_coeffs = numpy.zeros((4, 1))
camera_matrix = numpy.array( [ [ camera.P[0], camera.P[1], camera.P[2] ],
[ camera.P[4], camera.P[5], camera.P[6] ],
[ camera.P[8], camera.P[9], camera.P[10] ] ] )
ok, rot, trans = cv2.solvePnP(object_points, image_points, camera_matrix, dist_coeffs)
# Convert rotation into a 3x3 Rotation Matrix
rot3x3, _ = cv2.Rodrigues(rot)
# Reproject model points into image
object_points_world = numpy.asmatrix(rot3x3) * (numpy.asmatrix(object_points.squeeze().T) + numpy.asmatrix(trans))
reprojected_h = camera_matrix * object_points_world
reprojected = (reprojected_h[0:2, :] / reprojected_h[2, :])
reprojection_errors = image_points.squeeze().T - reprojected.T
reprojection_rms = numpy.sqrt(numpy.sum(numpy.array(reprojection_errors) ** 2) / numpy.product(reprojection_errors.shape))
# Print the results
print("Linearity RMS Error: %.3f Pixels Reprojection RMS Error: %.3f Pixels" % (linearity_rms, reprojection_rms))
else:
print('no chessboard')
示例2: test_arclength_half_circle
def test_arclength_half_circle():
""" Here we define the tests for the lenght computer of our ArcLengthParametrizer, we try it with a half a
circle and a fan.
We test it both in 2d and 3d."""
# Number of interpolation points minus one
n = 5
toll = 1.e-6
points = np.linspace(0, 1, (n+1) )
R = 1
P = 1
control_points_2d = np.asmatrix(np.zeros([n+1,2]))#[np.array([R*np.cos(5*i * np.pi / (n + 1)), R*np.sin(5*i * np.pi / (n + 1)), P * i]) for i in range(0, n+1)]
control_points_2d[:,0] = np.transpose(np.matrix([R*np.cos(1 * i * np.pi / (n + 1))for i in range(n+1)]))
control_points_2d[:,1] = np.transpose(np.matrix([R*np.sin(1 * i * np.pi / (n + 1))for i in range(n+1)]))
control_points_3d = np.asmatrix(np.zeros([n+1,3]))#[np.array([R*np.cos(5*i * np.pi / (n + 1)), R*np.sin(5*i * np.pi / (n + 1)), P * i]) for i in range(0, n+1)]
control_points_3d[:,0] = np.transpose(np.matrix([R*np.cos(1 * i * np.pi / (n + 1))for i in range(n+1)]))
control_points_3d[:,1] = np.transpose(np.matrix([R*np.sin(1 * i * np.pi / (n + 1))for i in range(n+1)]))
control_points_3d[:,2] = np.transpose(np.matrix([P*i for i in range(n+1)]))
vsl = AffineVectorSpace(UniformLagrangeVectorSpace(n+1),0,1)
dummy_arky_2d = ArcLengthParametrizer(vsl, control_points_2d)
dummy_arky_3d = ArcLengthParametrizer(vsl, control_points_3d)
length2d = dummy_arky_2d.compute_arclength()[-1,1]
length3d = dummy_arky_3d.compute_arclength()[-1,1]
# print (length2d)
# print (n * np.sqrt(2))
l2 = np.pi * R
l3 = 2 * np.pi * np.sqrt(R * R + (P / (2 * np.pi)) * (P / (2 * np.pi)))
print (length2d, l2)
print (length3d, l3)
assert (length2d - l2) < toll
assert (length3d - l3) < toll
示例3: fit
def fit(self, bags, y):
"""
@param bags : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@param y : an array-like object of length n containing -1/+1 labels
"""
self._bags = [np.asmatrix(bag) for bag in bags]
y = np.asmatrix(y).reshape((-1, 1))
bs = BagSplitter(self._bags, y)
if self.verbose:
print 'Training initial sMIL classifier for sbMIL...'
initial_classifier = sMIL(kernel=self.kernel, C=self.C, p=self.p, gamma=self.gamma,
scale_C=self.scale_C, verbose=self.verbose,
sv_cutoff=self.sv_cutoff)
initial_classifier.fit(bags, y)
if self.verbose:
print 'Computing initial instance labels for sbMIL...'
f_pos = initial_classifier.predict(bs.pos_inst_as_bags)
# Select nth largest value as cutoff for positive instances
n = int(round(bs.L_p * self.eta))
n = min(bs.L_p, n)
n = max(bs.X_p, n)
f_cutoff = sorted((float(f) for f in f_pos), reverse=True)[n - 1]
# Label all except for n largest as -1
pos_labels = -np.matrix(np.ones((bs.L_p, 1)))
pos_labels[np.nonzero(f_pos >= f_cutoff)] = 1.0
# Train on all instances
if self.verbose:
print 'Retraining with top %d%% as positive...' % int(100 * self.eta)
all_labels = np.vstack([-np.ones((bs.L_n, 1)), pos_labels])
super(SIL, self).fit(bs.instances, all_labels)
示例4: weights
def weights(self, X, Y, res):
alphas = res.x
#get weights from valid support vectors - probably should check the math here?
w1 = 0.0
w2 = 0.0
sv_indexes = []
for i in range(0, len(alphas)):
if alphas[i] > 1.0e-03:
w1 += alphas[i] * Y[i] * X[i][0]
w2 += alphas[i] * Y[i] * X[i][1]
self.sv_count += 1.0
sv_indexes.append(i)
W = [w1, w2]
self.W = W
#solve for b, or w0, using any SV
Wm = np.asmatrix(W)
try:
n = sv_indexes[0]
except IndexError:
self.no_svs += 1
return self.fit(X, Y)
xn = np.asmatrix(X[n])
xn = xn.getT()
self.b = (1/Y[n]) - Wm*xn
示例5: _randomize
def _randomize():
# Generate random transition,start and emission probabilities
# Store observations and states
num_obs = len(self.observations)
num_states = len(states)
# Generate a random list with sum of numbers = 1
a = np.random.random(num_states)
a /= a.sum()
# Initialize start_prob
self.start_prob = a
# Initialize transition matrix
# Fill each row with a list that sums upto 1
self.trans_prob = np.asmatrix(np.zeros((num_states,num_states)))
for i in range(num_states):
a = np.random.random(num_states)
a /= a.sum()
self.trans_prob[i,:] = a
# Initialize emission matrix
# Fill each row with a list that sums upto 1
self.em_prob = np.asmatrix(np.zeros((num_states,num_obs)))
for i in range(num_states):
a = np.random.random(num_obs)
a /= a.sum()
self.em_prob[i,:] = a
return self.start_prob, self.trans_prob, self.em_prob
示例6: elop
def elop(X, Y, op):
"""
Compute element-wise operation of matrix :param:`X` and matrix :param:`Y`.
:param X: First input matrix.
:type X: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia or :class:`numpy.matrix`
:param Y: Second input matrix.
:type Y: :class:`scipy.sparse` of format csr, csc, coo, bsr, dok, lil, dia or :class:`numpy.matrix`
:param op: Operation to be performed.
:type op: `func`
"""
try:
zp1 = op(0, 1) if sp.isspmatrix(X) else op(1, 0)
zp2 = op(0, 0)
zp = zp1 != 0 or zp2 != 0
except:
zp = 0
if sp.isspmatrix(X) or sp.isspmatrix(Y):
return _op_spmatrix(X, Y, op) if not zp else _op_matrix(X, Y, op)
else:
try:
X[X == 0] = np.finfo(X.dtype).eps
Y[Y == 0] = np.finfo(Y.dtype).eps
except ValueError:
return op(np.asmatrix(X), np.asmatrix(Y))
return op(np.asmatrix(X), np.asmatrix(Y))
示例7: train_map
def train_map(self):
if len(self.dataset) == 0:
return
X = self.dataset.inputs
Y = self.dataset.targets
# choose random center vectors from training set
rnd_idx = np.random.permutation(X.shape[0])[:self.numCenters]
self.centers = [X[i,:] for i in rnd_idx]
# calculate activations of RBFs
G = np.asmatrix(self._designMatrix(X))
Y = np.asmatrix(Y)
M = self.numCenters
# create (reset) prior over weights w
m0 = np.matrix(np.zeros((M, 1), float))
S0 = np.matrix(self.alpha*np.eye(M))
# calculate posterior (p. 153, eqns. 3.50, 3.51)
self.SN = S0.I + self.beta*G.T*G
self.mN = np.linalg.inv(self.SN) * (S0.I*m0 + self.beta*G.T*Y)
self.W = np.asarray(self.mN)
示例8: __init__
def __init__(self, submod, V, eps_p_f, ti=None, tally=True, verbose=0):
self.f_eval = submod.f_eval
self.f = submod.f
pm.StepMethod.__init__(self, [self.f, self.f_eval], tally=tally)
self.children_no_data = copy.copy(self.children)
if isinstance(eps_p_f, pm.Variable):
self.children_no_data.discard(eps_p_f)
self.eps_p_f = eps_p_f
else:
for epf in eps_p_f:
self.children_no_data.discard(epf)
self.eps_p_f = pm.Lambda("eps_p_f", lambda e=eps_p_f: np.hstack(e), trace=False)
self.V = pm.Lambda("%s_vect" % V.__name__, lambda V=V: V * np.ones(len(submod.f_eval)))
self.C_eval = submod.C_eval
self.M_eval = submod.M_eval
self.S_eval = submod.S_eval
M_eval_shape = pm.utils.value(self.M_eval).shape
C_eval_shape = pm.utils.value(self.C_eval).shape
self.ti = ti or np.arange(M_eval_shape[0])
# Work arrays
self.scratch1 = np.asmatrix(np.empty(C_eval_shape, order="F"))
self.scratch2 = np.asmatrix(np.empty(C_eval_shape, order="F"))
self.scratch3 = np.empty(M_eval_shape)
# Initialize hidden attributes
self.accepted = 0.0
self.rejected = 0.0
self._state = ["rejected", "accepted", "proposal_distribution"]
self._tuning_info = []
self.proposal_distribution = None
示例9: similarity
def similarity(self):
matrixvectout = numpy.asmatrix(self.vectout)
# print("matrixvectout shape is ", matrixvectout.shape)
matrixqvectsout = numpy.asmatrix(self.qvectsout.toarray())
# print("matrix qvectsout shape is ", matrixqvectsout.shape)
out = self.bm_vectobj.get_feature_names()
self.similaritymatrix = numpy.asarray(matrixvectout*matrixqvectsout.T)
示例10: generate_dataset
def generate_dataset():
global avg_trans_lis, type2_patient_lis, data_set, patient_ge_ag_lis, trans_dic, patient_smok_rec
for it in avg_trans_lis:
item_lis = it[1:6]
# get the gender and age
for a_it in patient_ge_ag_lis:
if it[0] == a_it[0]:
# gender and age
item_lis.append(a_it[1])
item_lis.append(a_it[2])
# get the number of visiting doctors
num_vis = len(trans_dic[it[0]])
item_lis.append(num_vis)
# get the smoking status record
item_lis.append(patient_smok_rec[it[0]])
# get the class tag
for ite in type2_patient_lis:
if it[0] == ite[0]:
item_lis.append(int(ite[1]))
# only store the patient has smoking record
# if int(patient_smok_rec[it[0]])>0:
# print ".......................00000000000000000000000000000"
data_set.append(item_lis)
# transform the dataset to change the distance from euclidean distance
# to mahalanobis distance
# get the covariance matrix
cov_matri = np.cov((np.asmatrix(data_set)[:, [0, 1, 2, 3, 4, 5, 6, 7, 8]]).transpose())
temp_data_set = (np.dot(np.asmatrix(data_set)[:, [0, 1, 2, 3, 4, 5, 6, 7, 8]], cov_matri)).tolist()
for it in range(0, len(data_set)):
item_lis = temp_data_set[it][0:9]
item_lis.append(data_set[it][9])
data_set[it] = item_lis
示例11: test_sum_squares
def test_sum_squares(self):
X = Variable(5, 4)
P = np.asmatrix(np.random.randn(3, 5))
Q = np.asmatrix(np.random.randn(4, 7))
M = np.asmatrix(np.random.randn(3, 7))
y = P*X*Q + M
self.assertFalse(y.is_constant())
self.assertTrue(y.is_affine())
self.assertTrue(y.is_quadratic())
self.assertTrue(y.is_dcp())
s = sum_squares(y)
self.assertFalse(s.is_constant())
self.assertFalse(s.is_affine())
self.assertTrue(s.is_quadratic())
self.assertTrue(s.is_dcp())
# Frobenius norm squared is indeed quadratic
# but can't show quadraticity using recursive rules
t = norm(y, 'fro')**2
self.assertFalse(t.is_constant())
self.assertFalse(t.is_affine())
self.assertFalse(t.is_quadratic())
self.assertTrue(t.is_dcp())
示例12: GDA_N_D
def GDA_N_D(X, M, cov1, cov2, cov3, classes,priors):
dclass1 = []
dclass2 = []
dclass3 = []
cov1 = np.asmatrix(cov1, dtype='float')
cov2 = np.asmatrix(cov2, dtype='float')
cov3 = np.asmatrix(cov3, dtype='float')
X = np.asmatrix(X[:, 0:4], dtype='float')
M = np.asmatrix(M, dtype='float')
for i in range(0, len(X)):
x = (X[i] - M[0])
y = (X[i] - M[1])
z = (X[i] - M[2])
dclass1.append(-mth.log(np.linalg.det(cov1)) - 0.5 * (
np.dot(np.dot(x, np.linalg.inv(cov1)), x.transpose())) + mth.log(priors[0]))
dclass2.append(-mth.log(np.linalg.det(cov2)) - 0.5 * (
np.dot(np.dot(y, np.linalg.inv(cov2)), y.transpose())) + mth.log(priors[1]))
dclass3.append(-mth.log(np.linalg.det(cov3)) - 0.5 * (
np.dot(np.dot(z, np.linalg.inv(cov3)), z.transpose())) + mth.log(priors[2]))
predict_class = []
for i, j, k in zip(dclass1, dclass2, dclass3):
if i > j and i > k:
predict_class.append(classes[0])
elif j > i and j > k:
predict_class.append(classes[1])
else:
predict_class.append(classes[2])
return predict_class
示例13: plot_pr
def plot_pr(self, i0, rg, qmax=5., dmax=200., ax=None):
""" calculate p(r) function
use the given i0 and rg value to fill in the low q part of the gap in data
truncate the high q end at qmax
"""
if ax==None:
ax = plt.gca()
ax.set_xscale('linear')
ax.set_yscale('linear')
if self.qgrid[-1]<qmax: qmax=self.qgrid[-1]
tqgrid = np.arange(0,qmax,qmax/len(self.qgrid))
tint = np.interp(tqgrid,self.qgrid,self.data)
tint[tqgrid*rg<1.] = i0*np.exp(-(tqgrid[tqgrid*rg<1.]*rg)**2/3.)
#tint -= tint[-10:].sum()/10
# Hanning window for reducing fringes in p(r)
tw = np.hanning(2*len(tqgrid)+1)[len(tqgrid):-1]
tint *= tw
trgrid = np.arange(0,dmax,1.)
kern = np.asmatrix([[rj**2*np.sinc(qi*rj/np.pi) for rj in trgrid] for qi in tqgrid])
tt = np.asmatrix(tint*tqgrid**2).T
tpr = np.reshape(np.array((kern.T*tt).T),len(trgrid))
tpr /= tpr.sum()
#plt.plot(tqgrid,tint,"g-")
#tpr = np.fft.rfft(tint)
#tx = range(len(tpr))
ax.plot(trgrid,tpr,"g-")
ax.set_xlabel("$r (\AA)$", fontsize='x-large')
ax.set_ylabel("$P(r)$", fontsize='x-large')
示例14: active
def active(self, X):
pre_h = np.zeros((1, self.h_size), dtype=theano.config.floatX)
[R, Z, GH, H] = self.cell.active(X, pre_h)
self.activation = np.asmatrix(H)
self.R = np.asmatrix(R)
self.Z = np.asmatrix(Z)
self.GH = np.asmatrix(GH)
示例15: _alpha_cal
def _alpha_cal(self,observations):
# Calculate alpha matrix and return it
num_states = self.em_prob.shape[0]
total_stages = len(observations)
# Initialize values
ob_ind = self.obs_map[ observations[0] ]
alpha = np.asmatrix(np.zeros((num_states,total_stages)))
c_scale = np.asmatrix(np.zeros((total_stages,1)))
# Handle alpha base case
alpha[:,0] = np.multiply ( np.transpose(self.em_prob[:,ob_ind]) , self.start_prob ).transpose()
# store scaling factors, scale alpha
c_scale[0,0] = 1/np.sum(alpha[:,0])
alpha[:,0] = alpha[:,0] * c_scale[0]
# Iteratively calculate alpha(t) for all 't'
for curr_t in range(1,total_stages):
ob_ind = self.obs_map[observations[curr_t]]
alpha[:,curr_t] = np.dot( alpha[:,curr_t-1].transpose() , self.trans_prob).transpose()
alpha[:,curr_t] = np.multiply( alpha[:,curr_t].transpose() , np.transpose( self.em_prob[:,ob_ind] )).transpose()
# Store scaling factors, scale alpha
c_scale[curr_t] = 1/np.sum(alpha[:,curr_t])
alpha[:,curr_t] = alpha[:,curr_t] * c_scale[curr_t]
# return the computed alpha
return (alpha,c_scale)