本文整理汇总了Python中numpy.array函数的典型用法代码示例。如果您正苦于以下问题:Python array函数的具体用法?Python array怎么用?Python array使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了array函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_partial_dependence_helpers
def test_partial_dependence_helpers(est, method, target_feature):
# Check that what is returned by _partial_dependence_brute or
# _partial_dependence_recursion is equivalent to manually setting a target
# feature to a given value, and computing the average prediction over all
# samples.
# This also checks that the brute and recursion methods give the same
# output.
X, y = make_regression(random_state=0)
# The 'init' estimator for GBDT (here the average prediction) isn't taken
# into account with the recursion method, for technical reasons. We set
# the mean to 0 to that this 'bug' doesn't have any effect.
y = y - y.mean()
est.fit(X, y)
# target feature will be set to .5 and then to 123
features = np.array([target_feature], dtype=np.int32)
grid = np.array([[.5],
[123]])
if method == 'brute':
pdp = _partial_dependence_brute(est, grid, features, X,
response_method='auto')
else:
pdp = _partial_dependence_recursion(est, grid, features)
mean_predictions = []
for val in (.5, 123):
X_ = X.copy()
X_[:, target_feature] = val
mean_predictions.append(est.predict(X_).mean())
pdp = pdp[0] # (shape is (1, 2) so make it (2,))
assert_allclose(pdp, mean_predictions, atol=1e-3)
示例2: get_tracedata
def get_tracedata(self, format = 'AmpPha', single=False):
'''
Get the data of the current trace
Input:
format (string) : 'AmpPha': Amp in dB and Phase, 'RealImag',
Output:
'AmpPha':_ Amplitude and Phase
'''
#data = self._visainstrument.ask_for_values(':FORMAT REAL,32;*CLS;CALC1:DATA:NSW? SDAT,1;*OPC',format=1)
data = self._visainstrument.ask_for_values('FORM:DATA REAL; FORM:BORD SWAPPED; CALC%i:SEL:DATA:SDAT?'%(self._ci), format = visa.double)
data_size = numpy.size(data)
datareal = numpy.array(data[0:data_size:2])
dataimag = numpy.array(data[1:data_size:2])
if format.upper() == 'REALIMAG':
if self._zerospan:
return numpy.mean(datareal), numpy.mean(dataimag)
else:
return datareal, dataimag
elif format.upper() == 'AMPPHA':
if self._zerospan:
datareal = numpy.mean(datareal)
dataimag = numpy.mean(dataimag)
dataamp = numpy.sqrt(datareal*datareal+dataimag*dataimag)
datapha = numpy.arctan(dataimag/datareal)
return dataamp, datapha
else:
dataamp = numpy.sqrt(datareal*datareal+dataimag*dataimag)
datapha = numpy.arctan2(dataimag,datareal)
return dataamp, datapha
else:
raise ValueError('get_tracedata(): Format must be AmpPha or RealImag')
示例3: test_RadiusNeighborsRegressor_multioutput_with_uniform_weight
def test_RadiusNeighborsRegressor_multioutput_with_uniform_weight():
"""Test radius neighbors in multi-output regression (uniform weight)"""
rng = check_random_state(0)
n_features = 5
n_samples = 40
n_output = 4
X = rng.rand(n_samples, n_features)
y = rng.rand(n_samples, n_output)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
for algorithm, weights in product(ALGORITHMS, [None, 'uniform']):
rnn = neighbors. RadiusNeighborsRegressor(weights=weights,
algorithm=algorithm)
rnn.fit(X_train, y_train)
neigh_idx = rnn.radius_neighbors(X_test, return_distance=False)
y_pred_idx = np.array([np.mean(y_train[idx], axis=0)
for idx in neigh_idx])
y_pred_idx = np.array(y_pred_idx)
y_pred = rnn.predict(X_test)
assert_equal(y_pred_idx.shape, y_test.shape)
assert_equal(y_pred.shape, y_test.shape)
assert_array_almost_equal(y_pred, y_pred_idx)
示例4: test_continuum_seismicity
def test_continuum_seismicity(self):
'''
Tests the function hmtk.strain.shift.Shift.continuum_seismicity -
the python implementation of the Subroutine Continuum Seismicity from
the Fortran 90 code GSRM.f90
'''
self.strain_model = GeodeticStrain()
# Define a simple strain model
test_data = {'longitude': np.zeros(3, dtype=float),
'latitude': np.zeros(3, dtype=float),
'exx': np.array([1E-9, 1E-8, 1E-7]),
'eyy': np.array([5E-10, 5E-9, 5E-8]),
'exy': np.array([2E-9, 2E-8, 2E-7])}
self.strain_model.get_secondary_strain_data(test_data)
self.model = Shift([5.66, 6.66])
threshold_moment = moment_function(np.array([5.66, 6.66]))
expected_rate = np.array([[-14.43624419, -22.48168502],
[-13.43624419, -21.48168502],
[-12.43624419, -20.48168502]])
np.testing.assert_array_almost_equal(
expected_rate,
np.log10(self.model.continuum_seismicity(
threshold_moment,
self.strain_model.data['e1h'],
self.strain_model.data['e2h'],
self.strain_model.data['err'],
BIRD_GLOBAL_PARAMETERS['OSRnor'])))
示例5: testCNCS
def testCNCS(self):
# CNCS_7860 is not an incoherent scatterer but for this test
# it doesn't matter
SA, Flux = MDNormSCDPreprocessIncoherent(Filename='CNCS_7860',
MomentumMin=1,
MomentumMax=1.5)
# Just compare 10 points of the Flux
flux_cmp = np.array([0.00000000e+00, 7.74945234e-04, 4.96143098e-03,
1.18914010e-02, 1.18049991e-01, 7.71872176e-01,
9.93078957e-01, 9.96312349e-01, 9.98450129e-01,
1.00000002e+00])
np.testing.assert_allclose(Flux.extractY()[0][::1000], flux_cmp)
self.assertEqual(Flux.getXDimension().name, 'Momentum')
self.assertEqual(Flux.getXDimension().getUnits(), 'Angstrom^-1')
self.assertEqual(Flux.blocksize(), 10000)
self.assertEqual(Flux.getNumberHistograms(), 1)
# Compare every 20-th bin of row 64
SA_cmp = np.array([0.11338311, 0.18897185, 0.15117748, 0.11338311, 0.03779437,
0.07558874, 0.15117748, 0.18897185, 0.03779437, 0.15117748,
0.11338311, 0.07558874, 0.03779437, 0. , 0.56691555,
0.26456059, 0.11338311, 0.07558874, 0.11338311, 0.])
np.testing.assert_allclose(SA.extractY().reshape((-1,128))[::20,64], SA_cmp)
self.assertEqual(SA.getXDimension().name, 'Momentum')
self.assertEqual(SA.getXDimension().getUnits(), 'Angstrom^-1')
self.assertEqual(SA.blocksize(), 1)
self.assertEqual(SA.getNumberHistograms(), 51200)
self.assertEqual(SA.getNEvents(), 51200)
示例6: __init__
def __init__(self,data,p,q,formula):
# Initialize TSM object
super(EGARCHMReg,self).__init__('EGARCHMReg')
# Latent variables
self.p = p
self.q = q
self.max_lag = max(self.p,self.q)
self.z_no = self.p + self.q + 2
self._z_hide = 0 # Whether to cutoff variance latent variables from results
self.supported_methods = ["MLE","PML","Laplace","M-H","BBVI"]
self.default_method = "MLE"
self.multivariate_model = False
self.leverage = False
self.model_name = "EGARCHMReg(" + str(self.p) + "," + str(self.q) + ")"
# Format the data
self.is_pandas = True # This is compulsory for this model type
self.data_original = data
self.formula = formula
self.y, self.X = dmatrices(formula, data)
self.z_no += self.X.shape[1]*2
self.y_name = self.y.design_info.describe()
self.data_name = self.y_name
self.X_names = self.X.design_info.describe().split(" + ")
self.y = np.array([self.y]).ravel()
self.data = self.y
self.X = np.array([self.X])[0]
self.index = data.index
self.initial_values = np.zeros(self.z_no)
self._create_latent_variables()
示例7: vertex_transform1
def vertex_transform1(vertex):
"""
This transform was applied on the original surface.
"""
return np.dot(rotation_matrix(np.array([0.0, 0.0, 1.0]), math.pi),
np.dot(rotation_matrix(np.array([1.0, 0.0, 0.0]), -math.pi / 1.6),
np.array([float(x) / 1.5 for x in vertex[:3]]) + np.array([0.0, -40.0, 20.0])))
示例8: init
def init():
global theMesh, theLight, theCamera, \
theScreen, resolution
initializeVAO()
glEnable(GL_CULL_FACE)
glEnable(GL_DEPTH_TEST)
# Add our object
# LIGHT
theLight = N.array((-0.577, 0.577, 0.577, 0.0),dtype=N.float32)
# OBJECT
phongshader = makeShader("phongshader.vert","phongshader.frag")
verts, elements = readOBJ("suzanne.obj")
suzanneVerts = getArrayBuffer(verts)
suzanneElements = getElementBuffer(elements)
suzanneNum = len(elements)
theMesh = coloredMesh(N.array((1.0, 0.5, 1.0, 1.0), dtype=N.float32),
suzanneVerts,
suzanneElements,
suzanneNum,
phongshader)
# CAMERA
width,height = theScreen.get_size()
aspectRatio = float(width)/float(height)
near = 0.01
far = 100.0
lens = 4.0 # "longer" lenses mean more telephoto
theCamera = Camera(lens, near, far, aspectRatio)
theCamera.moveBack(6)
示例9: calculate_user_similarity
def calculate_user_similarity(user_rating_dict,user_list,restaurant_list,score_matrix,user_mean):
similarity_matrix = []
for row in range(len(user_list)):
similarity_vector = []
list1 = user_rating_dict[user_list[row]].keys()
mean1 = user_mean[row]
for col in range(row,len(user_list)):
list2 = user_rating_dict[user_list[col]].keys()
mean2 = user_mean[col]
join_list = list(set(list1+list2))
rating_vector1 = []
rating_vector2 = []
for item in join_list:
if item in list1:
rating_vector1.append(user_rating_dict[user_list[row]][item]-mean1)
else:
rating_vector1.append(score_matrix[row,restaurant_list.index(item)]-mean1)
if item in list2:
rating_vector2.append(user_rating_dict[user_list[col]][item]-mean2)
else:
rating_vector2.append(score_matrix[col,restaurant_list.index(item)]-mean2)
similarity = numpy.sum(numpy.array(rating_vector1)*numpy.array(rating_vector2))/sqrt(numpy.sum(numpy.square(rating_vector1))*numpy.sum(numpy.square(rating_vector2)))
similarity_vector.append(similarity)
similarity_matrix.append(similarity_vector)
similarity_matrix = numpy.array(similarity_matrix)
for col in range(len(user_list)):
for row in range(col,len(user_list)):
similarity_matrix[row,col] = similarity_matrix[col,row]
return similarity_matrix
示例10: test_array_richcompare_legacy_weirdness
def test_array_richcompare_legacy_weirdness(self):
# It doesn't really work to use assert_deprecated here, b/c part of
# the point of assert_deprecated is to check that when warnings are
# set to "error" mode then the error is propagated -- which is good!
# But here we are testing a bunch of code that is deprecated *because*
# it has the habit of swallowing up errors and converting them into
# different warnings. So assert_warns will have to be sufficient.
assert_warns(FutureWarning, lambda: np.arange(2) == "a")
assert_warns(FutureWarning, lambda: np.arange(2) != "a")
# No warning for scalar comparisons
with warnings.catch_warnings():
warnings.filterwarnings("error")
assert_(not (np.array(0) == "a"))
assert_(np.array(0) != "a")
assert_(not (np.int16(0) == "a"))
assert_(np.int16(0) != "a")
for arg1 in [np.asarray(0), np.int16(0)]:
struct = np.zeros(2, dtype="i4,i4")
for arg2 in [struct, "a"]:
for f in [operator.lt, operator.le, operator.gt, operator.ge]:
if sys.version_info[0] >= 3:
# py3
with warnings.catch_warnings() as l:
warnings.filterwarnings("always")
assert_raises(TypeError, f, arg1, arg2)
assert_(not l)
else:
# py2
assert_warns(DeprecationWarning, f, arg1, arg2)
示例11: test
def test(self):
with self.test_session() as sess:
m = tf.constant(np.array([
[1.0, 2.0],
[2.0, 0.0]
], dtype=np.float32))
l = linear(m, 4)
result = sess.run(l, {
'SimpleLinear/Matrix:0': np.array([
[1.0, 2.0],
[1.0, 2.0],
[1.0, 2.0],
[1.0, 2.0],
]),
'SimpleLinear/Bias:0': np.array([
0.0,
1.0,
2.0,
3.0,
]),
})
self.assertAllClose(result, np.array([
[5.0, 6.0, 7.0, 8.0],
[2.0, 3.0, 4.0, 5.0],
]))
print(result)
示例12: loadTrajectoryData
def loadTrajectoryData(inFile = UJILocDataFile):
with open(UJILocDataFile, 'r') as dataFile:
data = dataFile.read()
# 9-axis IMU data
# trajectory: dictionary with three elements
# N is number of samples in the trajectory (data taken at 10Hz)
# mag: Nx3 numpy array where each line has XYZ mag data
# gyro: Nx3 numpy array where each line has XYZ gyro vel data
# accel: Nx3 numpy array where each line has XYZ lin accelerometer data
segments = data.split("<", 2)
IMUDataStr = segments[0].split('\n')[:-1]
magArr = []
oriArr = []
accelArr = []
for i, lineStr in enumerate(IMUDataStr):
lineStr = lineStr.split(' ', 10)[:-1]
lineStr = [float(x) for x in lineStr]
magArr.append(lineStr[1:4]) # xyz mag data for sample
accelArr.append(lineStr[4:7]) # xyz accelerometer data for single samp
oriArr.append(lineStr[7:10]) # xyz gyro data for sample
# values initially are given as euler angles which are not good for imu-type calculations.
# so we fix em!
gyroArr = rawSensorStateProc.orientationToGyro(oriArr)
initOrientationMatrix = rawSensorStateProc.calcInitialOrientation(oriArr[0])
# IMUData = [{'mag': magArr, 'gyro': gyroArr, 'accel': accelArr}]
# process waypoint data
# each waypoint consists of a latitude coordinate, longitude coordinate,
# and index (what IMU dataopoint it represents)
waypoints = []
waypointStr = segments[1].split(">", 2)
numWaypoints = int(waypointStr[0])
waypointLns = waypointStr[1].lstrip().split('\n')
for i, lineStr in enumerate(waypointLns):
line = lineStr.split(' ', WAYPOINTS_ELEMS_PER_LINE)
line = [float(x) for x in line]
if i == 0:
waypoints.append({'lat': line[0], 'long': line[1], 'index': line[4]})
waypoints.append({'lat': line[2], 'long': line[3], 'index': line[5]})
seqLen = line[5]
traj = ({'waypoints': np.array(waypoints), 'mag': np.array(magArr), 'gyro': np.array(gyroArr),
'accel': np.array(accelArr), 'orientSensed': np.array(oriArr),
'initOrient': initOrientationMatrix, 'seqLen': seqLen})
return traj
# loadTrajectoryData()
示例13: test_2d_complex_same
def test_2d_complex_same(self):
a = array([[1+2j,3+4j,5+6j],[2+1j,4+3j,6+5j]])
c = signal.fftconvolve(a,a)
d = array([[-3+4j,-10+20j,-21+56j,-18+76j,-11+60j],\
[10j,44j,118j,156j,122j],\
[3+4j,10+20j,21+56j,18+76j,11+60j]])
assert_array_almost_equal(c,d)
示例14: setUp
def setUp(self):
x = numpy.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928,
8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
X = x.reshape(6, 6)
XX = x.reshape(3, 2, 2, 3)
m = numpy.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x, mask=m)
mX = array(data=X, mask=m.reshape(X.shape))
mXX = array(data=XX, mask=m.reshape(XX.shape))
m2 = numpy.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x, mask=m2)
m2X = array(data=X, mask=m2.reshape(X.shape))
m2XX = array(data=XX, mask=m2.reshape(XX.shape))
self.d = (x, X, XX, m, mx, mX, mXX)
示例15: lars_regression_noise_ipyparallel
def lars_regression_noise_ipyparallel(pars):
import numpy as np
import os
import sys
import gc
Y_name,C_name,noise_sn,idxs_C, idxs_Y=pars
Y=np.load(Y_name,mmap_mode='r')
Y=np.array(Y[idxs_Y,:])
C=np.load(C_name,mmap_mode='r')
C=np.array(C)
_,T=np.shape(C)
#sys.stdout = open(str(os.getpid()) + ".out", "w")
st=time.time()
As=[]
#print "*****************:" + str(idxs_Y[0]) + ',' + str(idxs_Y[-1])
sys.stdout.flush()
for y,px in zip(Y,idxs_Y):
#print str(time.time()-st) + ": Pixel" + str(px)
sys.stdout.flush()
c=C[idxs_C[px],:]
if np.size(c)>0:
sn=noise_sn[px]**2*T
_,_,a,_,_=lars_regression_noise(y, c.T, 1, sn)
if not np.isscalar(a):
a=a.T
As.append((px,idxs_C[px],a))
del Y
del C
gc.collect()
return As#As