本文整理汇总了Python中numpy.column_stack函数的典型用法代码示例。如果您正苦于以下问题:Python column_stack函数的具体用法?Python column_stack怎么用?Python column_stack使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了column_stack函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: center_galaxy
def center_galaxy(image, original_image, centroid=None):
if centroid is None:
# apply median filter to find the galaxy centroid
centroid = median_filter(image, size=10).argmax()
centroid = np.unravel_index(centroid, image.shape)
# recenter image
roffset = centroid[0] - image.shape[0] / 2
if roffset < 0:
# add more white space to top of image
extra_rows = image.shape[0] - 2 * centroid[0]
image = np.vstack((np.zeros((extra_rows, image.shape[1])), image))
elif roffset > 0:
# add more white space to bottom of image
extra_rows = 2 * centroid[0] - image.shape[0]
image = np.vstack((image, np.zeros((extra_rows, image.shape[1]))))
coffset = centroid[1] - image.shape[1] / 2
if coffset > 0:
# add more white space to right of image
extra_columns = 2 * centroid[1] - image.shape[1]
image = np.column_stack((image, np.zeros((image.shape[0], extra_columns))))
elif coffset < 0:
# add more white space to left of image
extra_columns = image.shape[1] - 2 * centroid[1]
image = np.column_stack((np.zeros((image.shape[0], extra_columns)), image))
return image, centroid
示例2: expected
def expected(self, window_length, k, closes):
"""Compute the expected data (without adjustments) for the given
window, k, and closes array.
This uses talib.BBANDS to generate the expected data.
"""
lower_cols = []
middle_cols = []
upper_cols = []
for n in range(self.nassets):
close_col = closes[:, n]
if np.isnan(close_col).all():
# ta-lib doesn't deal well with all nans.
upper, middle, lower = [np.full(self.ndays, np.nan)] * 3
else:
upper, middle, lower = talib.BBANDS(
close_col,
window_length,
k,
k,
)
upper_cols.append(upper)
middle_cols.append(middle)
lower_cols.append(lower)
# Stack all of our uppers, middles, lowers into three 2d arrays
# whose columns are the sids. After that, slice off only the
# rows we care about.
where = np.s_[window_length - 1:]
uppers = np.column_stack(upper_cols)[where]
middles = np.column_stack(middle_cols)[where]
lowers = np.column_stack(lower_cols)[where]
return uppers, middles, lowers
示例3: writeOut
def writeOut(self, outname='',include_state=False):
"""
Function writes out to file... only doing primitive variables for now.
rho, u, p, maybe tack on e and h ....
This needs to be nice, but for debugging purposes, only doing to
write out to ascii for now... just ot be quick and easy to focus on
coding rather than fancy outputting.....
"""
x = self.grid.center()
rho = self.getPrimitive('Density')
u = self.getPrimitive('Velocity')
P = self.getPrimitive('Pressure')
if include_state:
data = np.column_stack((x,rho,u,P,self.q[0],self.q[1],self.q[2]))
header = '# x Density Velocity Pressure q0 q1 q2'
else:
data = np.column_stack((x,rho,u,P))
header = '# x Density Velocity Pressure'
# np.savetxt(outname + '_simstate_%3.3f_.txt'%(self.t), data,
np.savetxt(outname + '_simstate.txt', data, header=header, fmt='%1.4e')
示例4: combineTechnicalIndicators
def combineTechnicalIndicators(ticker):
dates, prices = getDateAndPrice(ticker)
np_dates = np.chararray(len(dates), itemsize=len(dates[0]))
for day in range(len(dates)):
np_dates[day] = dates[day]
percentChange = calcDailyPercentChange(prices)
vol = calc30DayVol(percentChange)
RSI = calcRSI(prices)
if ticker == PREDICTED:
np_prices = np.array(prices)
label = np.zeros_like(np_prices)
#create label for price of SPY
for x in range(len(np_prices[:-lagTime])):
print x
if np_prices[x] < np_prices[x + lagTime]:
label[x] = 1
else:
label[x] = 0
features = np.column_stack((np_dates, percentChange, vol, RSI, label))
headers = ['date', 'return_'+ ticker, 'vol_'+ ticker, 'RSI_'+ ticker, 'label']
else:
features = np.column_stack((np_dates, percentChange, vol, RSI))
headers = ['date', 'return_'+ ticker, 'vol_'+ ticker, 'RSI_'+ ticker]
df_features = pd.DataFrame(features, columns=headers)
print df_features[25:35]
return df_features
示例5: _recalc
def _recalc(self):
self.clear()
assert len(self.artists) == 0
if self.layout is None:
return
# layout[0] is [x0, x0, x[parent0], nan, ...]
# layout[1] is [y0, y[parent0], y[parent0], nan, ...]
ids = 3 * np.arange(self.layer.data.size)
try:
if isinstance(self.layer, Subset):
ids = ids[self.layer.to_mask()]
x, y = self.layout
blank = np.zeros(ids.size) * np.nan
x = np.column_stack([x[ids], x[ids + 1],
x[ids + 2], blank]).ravel()
y = np.column_stack([y[ids], y[ids + 1],
y[ids + 2], blank]).ravel()
except IncompatibleAttribute as exc:
self.disable_invalid_attributes(*exc.args)
return False
self.artists = self._axes.plot(x, y, '--')
return True
示例6: main
def main():
t0 = time.time() # start time
# output files path
TRAINX_OUTPUT = "../../New_Features/train_x_processed.csv"
TEST_X_OUTPUT = "../../New_Features/test__x_processed.csv"
# input files path
TRAIN_FILE_X1 = "../../ML_final_project/sample_train_x.csv"
TRAIN_FILE_X2 = "../../ML_final_project/log_train.csv"
TEST__FILE_X1 = "../../ML_final_project/sample_test_x.csv"
TEST__FILE_X2 = "../../ML_final_project/log_test.csv"
# load files
TRAIN_DATA_X1 = np.loadtxt(TRAIN_FILE_X1, delimiter=',', skiprows=1, usecols=(range(1, 18)))
TEST__DATA_X1 = np.loadtxt(TEST__FILE_X1, delimiter=',', skiprows=1, usecols=(range(1, 18)))
TRAIN_DATA_X2 = logFileTimeCount(np.loadtxt(TRAIN_FILE_X2, delimiter=',', skiprows=1, dtype=object))
TEST__DATA_X2 = logFileTimeCount(np.loadtxt(TEST__FILE_X2, delimiter=',', skiprows=1, dtype=object))
# combine files
TRAIN_DATA_X0 = np.column_stack((TRAIN_DATA_X1, TRAIN_DATA_X2))
TEST__DATA_X0 = np.column_stack((TEST__DATA_X1, TEST__DATA_X2))
# data preprocessing
scaler = StandardScaler()
TRAIN_DATA_X = scaler.fit_transform(TRAIN_DATA_X0)
TEST__DATA_X = scaler.transform(TEST__DATA_X0)
# output processed files
outputXFile(TRAINX_OUTPUT, TRAIN_DATA_X)
outputXFile(TEST_X_OUTPUT, TEST__DATA_X)
t1 = time.time() # end time
print "...This task costs " + str(t1 - t0) + " second."
示例7: wide_dataset_large
def wide_dataset_large():
print("Reading in Arcene training data for binomial modeling.")
trainDataResponse = np.genfromtxt(tests.locate("smalldata/arcene/arcene_train_labels.labels"), delimiter=' ')
trainDataResponse = np.where(trainDataResponse == -1, 0, 1)
trainDataFeatures = np.genfromtxt(tests.locate("smalldata/arcene/arcene_train.data"), delimiter=' ')
trainData = h2o.H2OFrame(np.column_stack((trainDataResponse, trainDataFeatures)).tolist())
print("Run model on 3250 columns of Arcene with strong rules off.")
model = h2o.glm(x=trainData[1:3250], y=trainData[0].asfactor(), family="binomial", lambda_search=False, alpha=[1])
print("Test model on validation set.")
validDataResponse = np.genfromtxt(tests.locate("smalldata/arcene/arcene_valid_labels.labels"), delimiter=' ')
validDataResponse = np.where(validDataResponse == -1, 0, 1)
validDataFeatures = np.genfromtxt(tests.locate("smalldata/arcene/arcene_valid.data"), delimiter=' ')
validData = h2o.H2OFrame(np.column_stack((validDataResponse, validDataFeatures)).tolist())
prediction = model.predict(validData)
print("Check performance of predictions.")
performance = model.model_performance(validData)
print("Check that prediction AUC better than guessing (0.5).")
assert performance.auc() > 0.5, "predictions should be better then pure chance"
示例8: get_peaks
def get_peaks(data,threshold,gap_threshold):
# apply threshold, result is a boolean array
abovethr = np.where( data >= threshold )[0]
belowthr = np.where( data < threshold )[0]
#### extract peaks
# first, find gaps in "above"/"below" labels (differences bigger than 1)
b1 = np.where( np.diff(abovethr)>1 )[0]
b2 = np.where( np.diff(belowthr)>1 )[0]
#~ pdb.set_trace()
# second, concatenate peak start and stop indices
# note the +1 which fixes the diff-offset
if belowthr[b2][0] > abovethr[b1][0]:
b1 = b1[1:]
if len(belowthr[b2]) == len(abovethr[b1]):
indices = np.column_stack(( belowthr[b2],abovethr[b1])) + 1
else:
indices = np.column_stack(( belowthr[b2],
np.concatenate((abovethr[b1],[abovethr[-1]])) )) + 1
# third, merge peaks if they are very close to eachother
indices_gaps = indices.flatten()[1:-1].reshape((-1,2))
gaps_to_preserve = np.where(np.diff(indices_gaps).flatten() > gap_threshold )[0]
indices_filtered = np.concatenate(( [indices[0,0]],
indices_gaps[gaps_to_preserve].flatten(),
[indices[-1,1]] )).reshape((-1,2))
return indices_filtered
示例9: write_parameters_outputvalues
def write_parameters_outputvalues(self, P):
Mstar, SFR_opt, _ = model.stellar_info_array(self.chain.flatchain_sorted, self.data, self.out['realizations2int'])
column_names = np.transpose(np.array(["P025","P16","P50","P84","P975"], dtype='|S3'))
chain_pars = np.column_stack((self.chain.flatchain_sorted, Mstar, SFR_opt))
# np.mean(chain_pars, axis[0]),
# np.std(chain_pars, axis[0]),
if self.out['calc_intlum']:
SFR_IR = model.sfr_IR(self.int_lums[0]) #check that ['intlum_names'][0] is always L_IR(8-100)
chain_others =np.column_stack((self.int_lums.T, SFR_IR))
outputvalues = np.column_stack((np.transpose(map(lambda v: (v[0],v[1],v[2],v[3],v[4]), zip(*np.percentile(chain_pars, [2.5,16, 50, 84,97.5], axis=0)))),
np.transpose(map(lambda v: (v[0],v[1],v[2],v[3],v[4]), zip(*np.percentile(chain_others, [2.5,16, 50, 84,97.5], axis=0)))) ))
outputvalues_header= ' '.join([ i for i in np.hstack((P.names, 'Mstar', 'SFR_opt', self.out['intlum_names'], 'SFR_IR',))] )
else:
outputvalues = np.column_stack((map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]), zip(*np.percentile(chain_pars, [16, 50, 84], axis=0)))))
outputvalues_header=' '.join( [ i for i in P.names] )
return outputvalues, outputvalues_header
示例10: residuals
def residuals(self, src, dst):
"""Compute the Sampson distance.
The Sampson distance is the first approximation to the geometric error.
Parameters
----------
src : (N, 2) array
Source coordinates.
dst : (N, 2) array
Destination coordinates.
Returns
-------
residuals : (N, ) array
Sampson distance.
"""
src_homogeneous = np.column_stack([src, np.ones(src.shape[0])])
dst_homogeneous = np.column_stack([dst, np.ones(dst.shape[0])])
F_src = self.params @ src_homogeneous.T
Ft_dst = self.params.T @ dst_homogeneous.T
dst_F_src = np.sum(dst_homogeneous * F_src.T, axis=1)
return np.abs(dst_F_src) / np.sqrt(F_src[0] ** 2 + F_src[1] ** 2
+ Ft_dst[0] ** 2 + Ft_dst[1] ** 2)
示例11: process_recarray
def process_recarray(data, endog_idx=0, exog_idx=None, stack=True, dtype=None):
names = list(data.dtype.names)
if isinstance(endog_idx, (int, long)):
endog = array(data[names[endog_idx]], dtype=dtype)
endog_name = names[endog_idx]
endog_idx = [endog_idx]
else:
endog_name = [names[i] for i in endog_idx]
if stack:
endog = np.column_stack(data[field] for field in endog_name)
else:
endog = data[endog_name]
if exog_idx is None:
exog_name = [names[i] for i in range(len(names))
if i not in endog_idx]
else:
exog_name = [names[i] for i in exog_idx]
if stack:
exog = np.column_stack(data[field] for field in exog_name)
else:
exog = recarray_select(data, exog_name)
if dtype:
endog = endog.astype(dtype)
exog = exog.astype(dtype)
dataset = Dataset(data=data, names=names, endog=endog, exog=exog,
endog_name=endog_name, exog_name=exog_name)
return dataset
示例12: create_colored_3d_points_from_matrices
def create_colored_3d_points_from_matrices(matrices, index_list):
points3d_l = []
colors_ll = []
mat_l = []
X_MULTIPLIER = 1/15.
for i, mat in enumerate(matrices):
X, Y = np.meshgrid(range(mat.shape[0]), range(mat.shape[1]))
x_size = mat.shape[0] * X_MULTIPLIER
X = np.matrix(X * X_MULTIPLIER) + x_size * i + (i * x_size / 3.)
#Y = (np.matrix(np.ones((mat.shape[0], 1))) * times_m).T
Y = (np.matrix(np.ones((mat.shape[0], 1))) * index_list[i]).T
Z = np.matrix(np.zeros(mat.shape)).T
points = np.row_stack((X.reshape(1, X.shape[0] * X.shape[1]),
Y.reshape(1, Y.shape[0] * Y.shape[1]),
Z.reshape(1, Z.shape[0] * Z.shape[1])))
colors = np.matrix(np.zeros((4, mat.shape[0]*mat.shape[1])))
mat_l.append(mat.T.reshape((1,mat.shape[1] * mat.shape[0])))
points3d_l.append(points)
colors_ll.append(colors)
all_mats = np.column_stack(mat_l)
all_points = np.column_stack(points3d_l)
all_colors = np.column_stack(colors_ll)
return all_mats, all_points, all_colors
示例13: listener_func
def listener_func(msg):
amat = vectorize_func(msg)
t = np.matrix([msg.header.stamp.to_time()])
got_lock = False
if self.channels[topic][0] == None:
self.channels[topic] = [amat, t, threading.RLock()]
else:
lock = self.channels[topic][2]
lock.acquire()
got_lock = True
#print 'l locked'
new_record = [np.column_stack((self.channels[topic][0], amat)),
np.column_stack((self.channels[topic][1], t)),
lock]
#print 'got something', new_record[0].shape
self.channels[topic] = new_record
#print 'after appending', self.channels[topic][0].shape, self.channels[topic][1].shape
#print 'time recorded is', t[0,0]
#print 'shape', self.channels[topic][0].shape
#lock.release()
#print 'l released'
lock = self.channels[topic][2]
if not got_lock:
lock.acquire()
#lock.acquire()
#select only messages n-seconds ago
n_seconds_ago = t[0,0] - buffer_length_secs
records_in_range = (np.where(self.channels[topic][1] >= n_seconds_ago)[1]).A1
#print records_in_range, self.channels[topic][0].shape
self.channels[topic][0] = self.channels[topic][0][:, records_in_range]
self.channels[topic][1] = self.channels[topic][1][:, records_in_range]
#print 'after shortening', self.channels[topic][0].shape, self.channels[topic][1].shape
#print 'shape after selection...', self.channels[topic][0].shape
lock.release()
示例14: main
def main(): #clustering and write output
if len(pep_array)>1:
matrix=[]
for i in range(0,len(pep_array)):
matrix.append(pep_array[i][4].replace('\"',"").split(','))
dataMatrix=numpy.array(matrix,dtype=float)
d = sch.distance.pdist(dataMatrix,metric)# vector of pairwise distances
if metric=="correlation":
D = numpy.clip(d,0,2) #when using correlation, all values in distance matrix should be in range[0,2]
else:
D=d
try:
cutoff=float(t)
except ValueError:
print "please provide a numeric value for --t"; sys.exit()
L = sch.linkage(D, method,metric)
ind = sch.fcluster(L,cutoff,'distance')#distance is dissmilarity(1-correlation)
p=numpy.array(pep_array)
p=numpy.column_stack([p,ind])
formatoutput(p)
else:
p=numpy.array(pep_array)
p=numpy.column_stack([p,[0]])
formatoutput(p)
示例15: main
def main():
LAMB = 10.0
SPLIT = 40
t0 = time.time()
TRAIN19_FILE = 'hw4_train.dat'
TRAIN19_DATA = np.loadtxt(TRAIN19_FILE, dtype=np.float)
xTrain19 = np.column_stack((np.ones(TRAIN19_DATA.shape[0]), TRAIN19_DATA[:, 0:(TRAIN19_DATA.shape[1] - 1)]))
yTrain19 = TRAIN19_DATA[:, (TRAIN19_DATA.shape[1] - 1)]
TEST19_FILE = 'hw4_test.dat'
TEST19_DATA = np.loadtxt(TEST19_FILE, dtype=np.float)
xTest19 = np.column_stack((np.ones(TEST19_DATA.shape[0]), TEST19_DATA[:, 0:(TEST19_DATA.shape[1] - 1)]))
yTest19 = TEST19_DATA[:, (TEST19_DATA.shape[1] - 1)]
lambPowList = []
eCvList = []
for lambPower in range(-10, 3):
eCv = vFoldErr(xTrain19, yTrain19, math.pow(LAMB, lambPower), SPLIT)
lambPowList.append(lambPower)
eCvList.append(eCv)
eCvList = np.array(eCvList)
minIndex = np.where(eCvList == eCvList.min())
index = minIndex[0].max()
plotHist(lambPowList, eCvList, "log(lambda)", "Ecv", "Q19", 1, False)
t1 = time.time()
print '========================================================='
print 'Question 19: log(lambda) is', lambPowList[index], 'Ecv is', eCvList[index]
print '---------------------------------------------------------'
print 'Q19 costs', t1 - t0, 'seconds'
print '========================================================='