本文整理汇总了Python中numpy.chararray函数的典型用法代码示例。如果您正苦于以下问题:Python chararray函数的具体用法?Python chararray怎么用?Python chararray使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了chararray函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: partition
def partition(data_set, target_set, training_ratio):
# Number of targets (3 targets here, either 'a', 'b', or 'c')
n_targets = len(np.unique(target_set))
print "Number of target values: %d" % n_targets
# Number of samples taken per target (23 in this example)
n_samples = len(data)/n_targets
print "Number of samples per target: %d" % n_samples
# Size of traning set
training_samples = int(round(n_samples * training_ratio))
print "Size of training set: %d" % training_samples
# Size of test set
test_samples = int(round(n_samples - training_samples))
print "Size of test set: %d" % test_samples
# Array to hold the targets for the training set
train_target = np.chararray(int(n_targets*training_samples))
# Array to hold the targes for the testing set
test_target = np.chararray(int(n_targets*test_samples))
# Matrix to hold the training data
train_data = np.empty([len(train_target), len(data[1])])
# Matrix to hold the test data
test_data = np.empty([len(test_target), len(data[1])])
# Initialize values for each array/matrix to its corresponding value
for target in np.arange(n_targets):
for i in np.arange(n_samples):
if i<= training_samples-1:
train_target[i+(target*training_samples-1)] = target_set[target*n_samples]
train_data[i+(target*training_samples-1)] = data_set[i+(target*n_samples-1)]
else:
test_target[(target*test_samples)+n_samples-i-1] = target_set[target*n_samples]
test_data[(target*test_samples)+n_samples-i-1] = data_set[i+(target*n_samples-1)]
return train_data, train_target, test_data, test_target
示例2: load_data
def load_data(path):
"""
load data from tiny-imagenet
note that in validation set, label information is in val_annotations.txt
"""
train_size = 100000
val_size = 10000
test_size = 10000
# for training data set
X_train = np.zeros((train_size, 3, 64, 64), dtype="uint8")
# y_train = np.zeros((train_size,), dtype="str")
y_train = np.chararray((train_size,), itemsize=10)
# for validation data set
X_val = np.zeros((val_size, 3, 64, 64), dtype="uint8")
# y_val = np.zeros((val_size,), dtype="str")
y_val = np.chararray((val_size,), itemsize=10)
#path_train = os.path.join(path, 'train')
#path_val = os.path.join(path, 'val')
print "load training data..."
for idx, (label, img) in enumerate(read_files(path,'train')):
# reshape (64, 64, 3) -> (3, 64, 64)
# gray color image is combined ... e.g. n04366367_182.JPEG
# Grey-scale means that all values have the same intensity. Set all channels
# (in RGB) equal to the the grey value and you will have the an RGB black and
# white image.
if img.ndim == 2:
img = np.array([img[:, :], img[:, :], img[:, :]])
elif img.ndim == 3:
img = np.array([img[:, :, 0], img[:, :, 1], img[:, :, 2]])
X_train[idx, :, :, :] = img
y_train[idx] = label
# change text label(n04366367, ...) to (0, 1, 2, ...)
print "encoding labels for training data..."
le = LabelEncoder()
y_train = le.fit_transform(y_train)
print "load validation data..."
for idx, (label, img) in enumerate(read_files(path,'val')):
# reshape (64, 64, 3) -> (3, 64, 64)
# gray color image is combined ... e.g. n04366367_182.JPEG
# Grey-scale means that all values have the same intensity. Set all channels
# (in RGB) equal to the the grey value and you will have the an RGB black and
# white image.
if img.ndim == 2:
img = np.array([img[:, :], img[:, :], img[:, :]])
elif img.ndim == 3:
img = np.array([img[:, :, 0], img[:, :, 1], img[:, :, 2]])
X_val[idx, :, :, :] = img
y_val[idx] = label
# change text label(n04366367, ...) to (0, 1, 2, ...)
print "encoding labels for validation data..."
y_val = le.transform(y_val.tolist())
return le, (X_train, y_train), (X_val, y_val)
示例3: crea_liste
def crea_liste(month, year):
calendario = calendar.Calendar().itermonthdays(year, month)
day = ["Lunedi", "Martedi", "Mercoledi", "Giovedi", "Venerdi", "Sabato", "Domenica"]
cont = 0
tupla = []
tupla2 = []
mese = str(month) + "/" + str(year)
for i in calendario:
tupla.append(i)
tupla2.append(day[cont % len(day)])
cont += 1
tupla3 = []
tupla4 = []
for i in xrange(0, len(tupla)):
if tupla[i] != 0:
tupla3.append(tupla[i])
tupla4.append(tupla2[i])
tupla = None
tupla2 = None
# Inizializzo la matrice
mat = numpy.chararray((len(tupla3), 7))
mat = numpy.chararray(mat.shape, itemsize="40")
mat[:] = " "
return [tupla4, mat, mese]
示例4: most_weighted
def most_weighted(X, CV, n=10, save=False):
"""Finds the most weighted words within an array.
Args
----
X: ndarray
Term-document array with books on each row and words
for each column
CV: vectorizer object
Provides the vocabulary.
Can either be a CountVectoriser or TfidfVectoriser.
n: int
The top n weighted words
save: bool
Whether to save out
Returns
-------
out: ndarray
Array of Strings
"""
# You have a 2d array: rows are books, cols are words
# You have a vocab dict with 'word': array_index
vocab = CV.vocabulary_
# Create zeros array of size vocab
vocab_array = np.chararray((len(vocab),), itemsize=18)
# Convert dict to an actual 1D array, where you have the right word at the right index
for k, v in vocab.items():
vocab_array[v] = k
# Get the sorted indices
ind = X.argsort(axis=1)
out = np.chararray((ind.shape[0], n), itemsize=18)
# For each row in ind
for i in range(ind.shape[0]):
# Grab the row from ind (this is the ordering you need to make it sorted)
ind_row = ind[i, :]
# Index your 1D words at indexes array with the row from ind - which puts it in order
# (basically, sorts according to the counts from X)
sorted = vocab_array[ind_row]
# Grab the last N values using [-n:]
out[i, :] = sorted[-n:]
top = pd.DataFrame(out, index=abbrev, columns=np.arange(10, 0, -1))
if save:
top.to_csv("top_{}_words.csv".format(n))
return top
示例5: visualize
def visualize(rings):
# the position of each ball is found using the equation of a circle
theta = 2 * math.pi / 20
radius = 5
xdata_left = np.zeros(20)
ydata_left = np.zeros(20)
xdata_right = np.zeros(20)
ydata_right = np.zeros(20)
plt.clf()
# get the positions for the left and right rings
for ix in xrange(0,20):
xdata_left[ix] = 5 + (radius * math.cos((ix + 5 * math.pi / 6) * theta))
ydata_left[ix] = 5 + (radius * math.sin((ix + 5 * math.pi / 6) * theta))
xdata_right[ix] = 12.08 + (radius * math.cos((ix + 63 * math.pi / 16) * -theta))
ydata_right[ix] = 5.3 + (radius * math.sin((ix + 63 * math.pi / 16) * -theta))
# set up the colors for the left and right ring
colors_left = np.chararray(20)
colors_right = np.chararray(20)
for ix in xrange(0,20):
if rings[0][ix] == 0:
colors_left[ix] = 'r'
elif rings[0][ix] == 1:
colors_left[ix] = 'b'
elif rings[0][ix] == 2:
colors_left[ix] = 'y'
elif rings[0][ix] == 3:
colors_left[ix] = 'k'
if rings[1][ix] == 0:
colors_right[ix] = 'r'
elif rings[1][ix] == 1:
colors_right[ix] = 'b'
elif rings[1][ix] == 2:
colors_right[ix] = 'y'
elif rings[1][ix] == 3:
colors_right[ix] = 'k'
# plot each ball individually to get the correct color
for ix in xrange(0,20):
plt.scatter(xdata_left[ix], ydata_left[ix], 1500, colors_left[ix])
plt.scatter(xdata_right[ix], ydata_right[ix], 1500, colors_right[ix])
plt.axis('equal')
plt.show()
plt.draw()
示例6: _setup_krd
def _setup_krd(*args):
krd = np.chararray((10, 8), order='F')
for i in xrange(10):
_chrcopy(krd, i, " ")
for i, arg in enumerate(args):
_chrcopy(krd, i, arg)
return krd
示例7: main
def main():
args = parse_args()
N = not args.N
out = args.out
#out='/home/vorberg/test.psc'
#N=1000
msa = np.chararray((N, 4))
set_1 = [io.AMINO_ACIDS[a] for a in np.random.choice(range(1, 11), N/2)]
set_2 = [io.AMINO_ACIDS[a] for a in np.random.choice(range(11, 21), N/2)]
set_3 = [io.AMINO_ACIDS[21-io.AMINO_INDICES[a]] for a in set_2]
set_4 = [io.AMINO_ACIDS[21-io.AMINO_INDICES[a]] for a in set_1]
msa[:, 0] = set_4 + set_3
msa[:, 1] = set_1 + set_2
msa[:, 2] = set_2 + set_1
msa[:, 3] = set_3 + set_4
np.savetxt(out, msa, delimiter='', newline='\n', fmt='%s')
示例8: levelGen
def levelGen(size):
world = np.chararray((size,size))
world[:]='.'
world[0,]='|'
world[-1,]='|'
world[:,0]='|'
world[:,-1]='|'
def r(): return random.randint(1,size-2)
area = size**2
density = float(world.count('|').sum()-size*4)/float(area)
'''
while density <0.15:
x,y=r(),r()
for i in range(random.randint(1,7)):
world[x%size,y%size]='|'
next = random.choice([-1,1])
if random.randint(0,1):
x+=next
else:
y+=next
density = float(world.count('|').sum()-size*4)/float(area)
'''
#sets spawnpoint
world[r(),r()]=random.choice(['N','E','S','W'])
world= '\n'.join(map(''.join,world.tolist()))
return world
示例9: _get_dates
def _get_dates(self, uid,lim):
locations_index = np.transpose(self._locations[:, USER_ID] == int(uid))
date_locations = self._locations[locations_index, :]
user_locations = date_locations[:, (LATITUDE, LONGITUDE,ORDINAL_DATE)]
user_location_size=np.shape(user_locations)
datedanszone=[]
for i in range(int(user_location_size[0])):
contains= self._inside(lim,user_locations[i,0],user_locations[i,1])
if contains:
datedanszone.append(user_locations[i,2])
datedanszone=list(set(datedanszone))
if len(datedanszone)==0:
return {}
datematrix=np.chararray((len(datedanszone),2),itemsize=30)
week=['Lundi','Mardi','Mercredi','Jeudi','Vendredi','Samedi','Dimanche']
month=['Janvier','Fevrier','Mars','Avril','Mai','Juin','Juillet','Aout','Septembre','Octobre','Novembre','Decembre']
i=0
for d in datedanszone:
datematrix[i,0]=int(d)
datedisp=dt.datetime.fromordinal(int(d))
datematrix[i,1]=week[datedisp.weekday()]+" "+str(datedisp.day)+" "+month[datedisp.month-1]+" "+str(datedisp.year)
i=i+1
return {
"availableOptionsForDate": [{"date":d,"datedisp":dd} for d,dd in datematrix ],
"selectedOptionfordate": {"date":datedanszone[0]}
}
示例10: depth_count
def depth_count():
ops = ['+','-','*','/','^']
parantheses=['(',')']
input = raw_input('*')
j=0
k=0
l=0
processed = np.chararray((len(input),1))
oplist = []
parlist = []
for e in input:
if e in ops:
oplist.append(e)
processed[j,0]=k
k = k + 1
else:
if e in parantheses:
oplist.append(0)
parlist.append(e)
processed[j,0]=l
l = l + 1
else:
processed[j,0]=e
j=j+1
print processed
print oplist
print parlist
return k
示例11: toChararray
def toChararray(arr, aligned=False):
arr = array(arr, dtype='|S')
try:
ndim, dtype_, shape = arr.ndim, arr.dtype, arr.shape
except AttributeError:
raise TypeError('arr is not a Numpy array')
if ndim < 1:
raise ValueError('arr.ndim should be at least 1')
if dtype_.char != 'S':
raise ValueError('arr must be a character array')
if ndim != 2:
n_seq = shape[0]
l_seq = dtype_.itemsize
new_arr = chararray((n_seq, l_seq))
for i, s in enumerate(arr):
for j in range(l_seq):
if j < len(s):
new_arr[i, j] = chr2(s[j])
else:
if aligned:
raise ValueError('arr does not the same lengths')
new_arr[i, j] = '.'
else:
new_arr = array(arr, dtype='|S1')
return new_arr
示例12: main
def main():
""" Updates given HDF5 with readme text provided in a text file.
Text gets saved as attribute "readme" in the root group.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--h5file", help="HDF5 File to be updated")
parser.add_argument("--readme", help="Text file with readme content")
args = parser.parse_args()
if not args.h5file:
print("No HDF5 given")
return -1
if not args.readme:
print("No readme file given")
return -1
f = h5py.File(args.h5file, 'a')
with open(args.readme, 'r', encoding="latin-1") as readme_file:
text = readme_file.read()
char_array = np.chararray((), itemsize=len(text))
char_array[()] = text
#print(char_array)
f.attrs.create('readme', char_array)
f.close()
print("bye")
示例13: create_iso
def create_iso(fileList, ageList, rot=True):
"""
Given a set of isochrone files downloaded from
http://obswww.unige.ch/Recherche/evoldb/index/Isochrone/, put in correct
iso.fits format for parse_iso code.
fileList: list of downloaded isochrone files (could be one)
ageList: list of lists of ages associated with each file in filelist.
MUST BE IN SAME ORDER AS ISOCHRONES IN FILE! Also needs to be in logAge
rot = TRUE: assumes that models are rotating, will add appropriate column
This code writes the individual files, which is then easiest to combine by hand
in aquamacs
"""
# Read each file in fileList individually, add necessary columns
for i in range(len(fileList)):
t = Table.read(fileList[i],format='ascii')
ages = ageList[i]
# Find places where new models start; mass here is assumed to be 0.8
start = np.where(t['M_ini'] == 0.8)
# Now, each identified start is assumed to be associated with the
# corresponding age in ages
if len(start[0]) != len(ages):
print 'Ages mismatched in file! Quitting...'
return
age_arr = np.zeros(len(t))
for j in range(len(start[0])):
low_ind = start[0][j]
# Deal with case at end of file
if (j == len(start[0])-1):
high_ind = len(t)
else:
high_ind = start[0][j+1]
ind = np.arange(low_ind, high_ind, 1)
age_arr[ind] = ages[j]
# Add ages_arr column to column 1 in ischrone, as well as column
# signifying rotation
col_age = Column(age_arr, name = 'logAge')
rot_val = np.chararray(len(t))
rot_val[:] = 'r'
if not rot:
rot_val[:] = 'n'
col_rot = Column(rot_val, name='Rot')
t.add_column(col_rot, index=0)
t.add_column(col_age, index=0)
t.write('tmp'+str(i)+'.fits')
return
示例14: __init__
def __init__(self, filename):
"""
Args:
filename_list (str): list of strings with filenames. These filenames are expected to be in the
FITS format for targets.
"""
hdulist = fits.open(filename)
self.filename = filename
self.ra = hdulist[1].data['RA']
self.dec = hdulist[1].data['DEC']
self.type = hdulist[1].data['OBJTYPE']
self.id = np.int_(hdulist[1].data['TARGETID'])
self.tile_ra = hdulist[1].header['TILE_RA']
self.tile_dec = hdulist[1].header['TILE_DEC']
self.tile_id = hdulist[1].header['TILE_ID']
self.n = np.size(self.ra)
fc = desimodel.focalplane.FocalPlane(ra=self.tile_ra, dec=self.tile_dec)
self.x, self.y = fc.radec2xy(self.ra, self.dec)
# this is related to the fiber assignment
self.fiber = -1.0 * np.ones(self.n, dtype='i4')
# This section is related to the number of times a galaxy has been observed,
# the assigned redshift and the assigned type
self.n_observed = np.zeros(self.n, dtype='i4')
self.assigned_z = -1.0 * np.ones(self.n)
self.assigned_type = np.chararray(self.n, itemsize=8)
self.assigned_type[:] = 'NONE'
示例15: test__merge_cols
def test__merge_cols(self):
cs = ColSplitter()
cs._token_col_types = [cs._int, cs._float]
cs._token_col_lengths = [-1, -1]
charr = np.chararray((7, 2), 5)
charr[0, 0] = cs._null
charr[1, 0] = '23'
charr[2, 0] = cs._null
charr[3, 0] = cs._null
charr[4, 0] = '42'
charr[5, 0] = '123'
charr[6, 0] = cs._null
charr[0, 1] = '12.0'
charr[1, 1] = cs._null
charr[2, 1] = '13.0'
charr[3, 1] = cs._null
charr[4, 1] = cs._null
charr[5, 1] = cs._null
charr[6, 1] = cs._null
res = cs._merge_cols(charr)
# self.assertEqual((5, 1), res.shape)
self.assertEqual(b'12', res[0, 0])
self.assertEqual(b'23', res[1, 0])
self.assertEqual(b'13', res[2, 0])
self.assertEqual(cs._null, res[3, 0])
self.assertEqual(b'42', res[4, 0])