本文整理汇总了Python中numpy.np_array函数的典型用法代码示例。如果您正苦于以下问题:Python np_array函数的具体用法?Python np_array怎么用?Python np_array使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了np_array函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, dbFileName, force=False, scaleFactor=1000):
# data
self.dataManager = GMDataManager() # most data is saved to hdf
self.dbFileName = dbFileName # db containing all the data we'd like to use
self.condition = "" # condition will be supplied at loading time
# --> NOTE: ALL of the arrays in this section are in sync
# --> each one holds information for an individual contig
self.indices = np_array([]) # indices into the data structure based on condition
self.covProfiles = np_array([]) # coverage based coordinates
self.transformedCP = np_array([]) # the munged data points
self.averageCoverages = np_array([]) # average coverage across all stoits
self.kmerSigs = np_array([]) # raw kmer signatures
self.kmerVals = np_array([]) # PCA'd kmer sigs
self.contigNames = np_array([])
self.contigLengths = np_array([])
self.contigColours = np_array([]) # calculated from kmerVals
self.binIds = np_array([]) # list of bin IDs
# --> end section
# meta
self.validBinIds = {} # valid bin ids -> numMembers
self.binnedRowIndicies = {} # dictionary of those indices which belong to some bin
self.restrictedRowIndicies = {} # dictionary of those indices which can not be binned yet
self.numContigs = 0 # this depends on the condition given
self.numStoits = 0 # this depends on the data which was parsed
# contig links
self.links = {}
# misc
self.forceWriting = force # overwrite existng values silently?
self.scaleFactor = scaleFactor # scale every thing in the transformed data to this dimension
示例2: plot_energy
def plot_energy(S, filename):
cen = np_array(S**2).cumsum() / np_array(S**2).sum() * 100
DPI = 100
fig = plt.figure()
# Energy subplot
ax1 = fig.add_subplot(2, 1, 1)
line = ax1.plot(S**2, "o-", linewidth=1)
ax1.set_yscale("log")
plt.title("Basis vector vs Energy")
plt.xlabel("Basis vector number")
plt.ylabel("Energy")
plt.axis([0, None, None, None])
plt.grid(True)
# Cumulative energy subplot
ax2 = fig.add_subplot(2, 1, 2)
line = ax2.plot(cen, "o-", linewidth=1)
ax2.axis([0, None, 90, 101])
plt.title("Cumulative Energy")
plt.xlabel("Basis vector number")
plt.ylabel("Cumulative Energy")
plt.grid(True)
plt.tight_layout()
plt.savefig('{}.png'.format(filename), bbox_inches='tight', dpi=DPI)
return
示例3: compare_csv_decimal_files
def compare_csv_decimal_files(file1, file2, header=True, timeseries=False):
"""
This function compares two csv files
"""
#CHECK NUM LINES
with open_csv(file1) as fh1, \
open_csv(file2) as fh2:
assert sum(1 for line1 in fh1) == sum(1 for line2 in fh2)
with open_csv(file1) as fh1, \
open_csv(file2) as fh2:
csv1 = csv.reader(fh1)
csv2 = csv.reader(fh2)
if header:
assert next(csv1) == next(csv2) #header
while True:
try:
row1 = next(csv1)
row2 = next(csv2)
compare_start_index = 0
if timeseries:
assert row1[0] == row2[0] #check dates
compare_start_index=1
assert_almost_equal(np_array(row1[compare_start_index:], dtype=np_float32),
np_array(row2[compare_start_index:], dtype=np_float32),
decimal=2)
except StopIteration:
break
pass
return True
示例4: real_imaginary_freq_domain
def real_imaginary_freq_domain(samples):
"""
Apply fft on the samples and return the real and imaginary
parts in separate
"""
freq_domain = fft(samples)
freq_domain_real = np_array([abs(x.real) for x in freq_domain])
freq_domain_imag = np_array([abs(x.imag) for x in freq_domain])
return freq_domain_real, freq_domain_imag
示例5: __init_statistics
def __init_statistics(self):
stats = self.raw_stats
if stats is not None:
combined = np_array([[int(team), stats['oprs'][team], stats['dprs'][team],
stats['ccwms'][team]] for team in stats['oprs'].keys()], np_object)
else:
teams = self.get_team()[:, 0]
num_teams = len(teams)
combined = np_rot90(
np_array([teams, np_zeros(num_teams), np_zeros(num_teams), np_zeros(num_teams)], np_object))[::-1]
self.stats = combined
示例6: centroidfinder
def centroidfinder(cvimage, color, threshold):
lo = [ c - t for c, t in zip(color, threshold) ]
hi = [ c + t for c, t in zip(color, threshold) ]
mat = cv.CreateMat(cvimage.height, cvimage.width, cv.CV_8U)
cv.InRangeS(cvimage, lo, hi, mat)
data = [ [x, y] for x, y in product(range(mat.height), range(mat.width))
if int(mat[x, y]) ]
np_data = np_array(data)
np_centroids = np_array( [ [0, 0], [0, mat.width],
[mat.height, 0], [mat.height, mat.width] ])
centroids, labels = kmeans2(np_data, np_centroids)
return [ (x, y) for x, y in centroids.tolist() ]
示例7: _interp_line
def _interp_line(self, line):
# handles = line.getViewHandlePositions()
# x = []
# y = []
# for h in handles:
# x.append(h[1].x())
# y.append(h[1].y())
(x,y) = self._handle2points(line)
xi = range(int(x[0]),int(x[-1])+1)
yi = np_interp(np_array(xi), np_array(x), np_array(y))
return (xi,yi)
示例8: updateAlgoData
def updateAlgoData():
"""
Update from raw data into FPs directly used by location.fixPosWLAN() from WppDB(wpp_clusterid, wpp_cfps).
1) Retrieve latest incremental rawdata(csv) from remote FTP server(hosted by FPP).
2) Decompress bzip2, import CSV into wpp_uprecsinfo with its ver_uprecs, Update ver_uprecs in wpp_uprecsver.
3) Incr clustering inserted rawdata for direct algo use.
"""
dbips = DB_OFFLINE
for dbip in dbips:
dbsvr = dbsvrs[dbip]
wppdb = WppDB(dsn=dbsvr['dsn'], dbtype=dbsvr['dbtype'])
ver_wpp = wppdb.getRawdataVersion()
# Sync rawdata into wpp_uprecsinfo from remote FTP server.
print 'Probing rawdata version > [%s]' % ver_wpp
vers_fpp,localbzs = syncFtpUprecs(FTPCFG, ver_wpp)
if not vers_fpp: print 'Not found!'; continue
else: print 'Found new vers: %s' % vers_fpp
# Handle each bzip2 file.
alerts = {'vers':[], 'details':''}
tab_rd = 'wpp_uprecsinfo'
for bzfile in localbzs:
# Filter out the ver_uprecs info from the name of each bzip file.
ver_bzfile = bzfile.split('_')[-1].split('.')[0]
# Update ver_uprecs in wpp_uprecsver to ver_bzfile.
wppdb.setRawdataVersion(ver_bzfile)
print '%s\nUpdate ver_uprecs -> [%s]' % ('-'*40, ver_bzfile)
# Decompress bzip2.
sys.stdout.write('Decompress & append rawdata ... ')
csvdat = csv.reader( BZ2File(bzfile) )
try:
indat = np_array([ line for line in csvdat ])
except csv.Error, e:
sys.exit('\n\nERROR: %s, line %d: %s!\n' % (bzfile, csvdat.line_num, e))
# Append ver_uprecs(auto-incr),area_ok(0),area_try(0) to raw 16-col fp.
append_info = np_array([ [ver_bzfile,0,0] for i in xrange(len(indat)) ])
indat_withvers = np_append(indat, append_info, axis=1).tolist(); print 'Done'
# Import csv into wpp_uprecsinfo.
try:
sys.stdout.write('Import rawdata: ')
wppdb.insertMany(table_name=tab_rd, indat=indat_withvers, verb=True)
except Exception, e:
_lineno = sys._getframe().f_lineno
_file = sys._getframe().f_code.co_filename
alerts['details'] += '\n[ver:%s][%s:%s]: %s' % \
(ver_bzfile, _file, _lineno, str(e).replace('\n', ' '))
alerts['vers'].append(ver_bzfile)
print 'ERROR: Insert Rawdata Failed!'
continue
示例9: __init_matches
def __init_matches(self):
for match_type, var in [['qm', 'qualification_matches'], ['qf', 'quarter_final_matches'],
['sf', 'semi_final_matches'], ['f', 'final_matches']]:
num_matches = self.__count_matches(self.raw_matches, match_type)
if num_matches is not 0:
# zero = range(num_matches)
red_teams = np_zeros((num_matches,), np_object)
blue_teams = np_zeros((num_matches,), np_object)
blue_scores = np_zeros((num_matches,), np_object)
red_scores = np_zeros((num_matches,), np_object)
match_code = np_zeros((num_matches,), np_object)
match_numbers = np_arange(1, num_matches + 1, 1)
for match in self.raw_matches:
if match['comp_level'] == match_type:
match_num = match['match_number'] - 1
red_teams[match_num] = [np_int(match['alliances']['red']['teams'][0][3:]),
np_int(match['alliances']['red']['teams'][1][3:]),
np_int(match['alliances']['red']['teams'][2][3:])]
red_scores[match_num] = [-1 if match['alliances']['red']['score'] is None
else match['alliances']['red']['score'],
-1 if match['score_breakdown']['red']['auto'] is None
else match['score_breakdown']['red']['auto'],
-1 if match['score_breakdown']['red']['foul'] is None
else match['score_breakdown']['red']['foul']]
blue_teams[match_num] = [np_int(match['alliances']['blue']['teams'][0][3:]),
np_int(match['alliances']['blue']['teams'][1][3:]),
np_int(match['alliances']['blue']['teams'][2][3:])]
blue_scores[match_num] = [-1 if match['alliances']['blue']['score'] is None
else match['alliances']['blue']['score'],
-1 if match['score_breakdown']['blue']['auto'] is None
else match['score_breakdown']['blue']['auto'],
-1 if match['score_breakdown']['blue']['foul'] is None
else match['score_breakdown']['blue']['foul']]
match_code[match_num] = match['key']
red_win = np_array(red_scores.tolist())[:, 0] > np_array(blue_scores.tolist())[:, 0]
winner = np_array(['blue'] * len(red_win))
winner[red_win] = 'red'
self.__setattr__(var,
np_rot90(np_array([[match_type] * num_matches, match_numbers, red_teams, blue_teams,
red_scores, blue_scores, winner, match_code], np_object))[::-1])
示例10: get_zone_count_estimates
def get_zone_count_estimates(location_id, door_count_placement_view_pair, start_date, end_date, adjusted=False):
"""Iterates through .csv files to return a list of (datetime, zone_count)
ARGS
location_id: location_id of installation, eg '55'
door_count_placement_view_pair: placement and view id pair, e.g. ('3333230','0')
start_date: in format YYYY-MM-DD, <datetime>
end_date: in format YYYY-MM-DD. range is exclusive '<'. <datetime>
adjusted: to select between raw data or adusted <bool>. if adjusted is chosen but not available, returns raw.
RETURN
array with (datetime, zone_count) tuples
"""
datetime_zone_count_pairs = []
day = timedelta(days = 1)
curr_day = start_date
while curr_day < end_date:
date_str = date2str(curr_day, "%Y-%m-%d")
fullpath = ANALYSIS_FOLDER_GLOBAL+str(location_id)+'/'+gtfilename(location_id,door_count_placement_view_pair,curr_day)
if DEBUG:
print 'get_zone_count_estimates: reading file:', fullpath
data = read_csv(fullpath)
for idx in range(len(data)):
ts = utc.localize(get_datetime_from_csv_row(data[idx]), is_dst=None).astimezone(utc)
if ts >= start_date and ts < end_date:
datetime_zone_count_pairs.append(get_zone_count(data[idx], adjusted))
curr_day += day
datetime_zone_count_pairs = np_array(datetime_zone_count_pairs)
return datetime_zone_count_pairs
示例11: iterboxed
def iterboxed(self, rows):
"""Iterator that yields each scanline in boxed row flat pixel
format. `rows` should be an iterator that yields the bytes of
each row in turn.
"""
def asvalues(raw):
"""Convert a row of raw bytes into a flat row. Result will
be a freshly allocated object, not shared with
argument.
"""
if self.bitdepth == 8:
return np_array(raw,'uint8')
else:
raw = tostring(raw)
return np_array(struct.unpack('!%dH' % (len(raw)//2), raw),'uint%d' % self.bitdepth)
assert self.bitdepth < 8
width = self.width
# Samples per byte
spb = 8//self.bitdepth
out = array('B')
mask = 2**self.bitdepth - 1
shifts = [self.bitdepth * i
for i in reversed(list(range(spb)))]
for o in raw:
out.extend([mask&(o>>i) for i in shifts])
return out[:width]
return np_array(map(asvalues, rows))
示例12: amplitude_regularization
def amplitude_regularization(signal, bits=16, factor=0.7):
"""
ARGS:
signal: signal amplitudes, should be in the range [-1.0, 1.0], numpy array of numbers
bits: bit-depth value, <int>
factor: 0.7 by default, as suggested by Gerald Friedland @ ICSI
RETURN:
regularized: amplitude regularized signal, <number> or numpy array of numbers
"""
if isinstance(signal, list):
signal = np_array(signal)
elif isinstance(signal, (int, long, float, complex)):
raise Exception("Invalid arg")
# convert amplitude [-1.0, 1.0] to N-bit samples
half_n_bits = 2**(bits-1)
signal_scaled_to_n_bits = (signal + 1) * half_n_bits
# regularize
regularized = signal_scaled_to_n_bits ** factor
# scale back to [-1.0,1.0]
regularized -= half_n_bits
regularized /= half_n_bits
return regularized
示例13: pca
def pca(self, data_matrix):
"""Perform PCA.
Principal components are given in self.pca,
and the variance in self.variance.
Parameters
----------
data_matrix : list of lists
List of tetranucleotide signatures
"""
cols = len(data_matrix[0])
data_matrix = np_reshape(np_array(data_matrix), (len(data_matrix), cols))
pca = PCA()
pc, variance = pca.pca_matrix(data_matrix, 3, bCenter=True, bScale=False)
# ensure pc matrix has at least 3 dimensions
if pc.shape[1] == 1:
pc = np_append(pc, np_zeros((pc.shape[0], 2)), 1)
variance = np_append(variance[0], np_ones(2))
elif pc.shape[1] == 2:
pc = np_append(pc, np_zeros((pc.shape[0], 1)), 1)
variance = np_append(variance[0:2], np_ones(1))
return pc, variance
示例14: sim
def sim(self, src, tar):
"""Return the Steffensen similarity of two strings.
Parameters
----------
src : str
Source string (or QGrams/Counter objects) for comparison
tar : str
Target string (or QGrams/Counter objects) for comparison
Returns
-------
float
Steffensen similarity
Examples
--------
>>> cmp = Steffensen()
>>> cmp.sim('cat', 'hat')
0.24744247205786737
>>> cmp.sim('Niall', 'Neil')
0.1300991207720166
>>> cmp.sim('aluminum', 'Catalan')
0.011710186806836031
>>> cmp.sim('ATCG', 'TAGC')
4.1196952743871653e-05
.. versionadded:: 0.4.0
"""
if src == tar:
return 1.0
if not src or not tar:
return 0.0
self._tokenize(src, tar)
a = self._intersection_card()
b = self._src_only_card()
c = self._tar_only_card()
d = self._total_complement_card()
n = a + b + c + d
p = np_array([[a, b], [c, d]]) / n
psisq = 0.0
for i in range(len(p)):
pi_star = p[i, :].sum()
for j in range(len(p[i])):
pj_star = p[:, j].sum()
num = p[i, j] * (p[i, j] - pi_star * pj_star) ** 2
if num:
psisq += num / (
pi_star * (1 - pi_star) * pj_star * (1 - pj_star)
)
return psisq
示例15: hz2mel
def hz2mel(f):
""" Convert a number or numpy numerical array of frequencies in Hz into mel
ARGS: Frequency or array of frequencies, <number> or numpy array of numbers
RETURN: Mel frequency(ies), <number> or numpy array of numbers
"""
if isinstance(f, list):
f = np_array(f)
return 1127.01048 * log(f/700.0 +1)