本文整理汇总了Python中numpy.require函数的典型用法代码示例。如果您正苦于以下问题:Python require函数的具体用法?Python require怎么用?Python require使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了require函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: read_wfsx
def read_wfsx(self, fname, **kw):
""" An occasional reading of the SIESTA's .WFSX file """
from pyscf.nao.m_siesta_wfsx import siesta_wfsx_c
from pyscf.nao.m_siesta2blanko_denvec import _siesta2blanko_denvec
from pyscf.nao.m_fermi_dirac import fermi_dirac_occupations
self.wfsx = siesta_wfsx_c(fname=fname, **kw)
assert self.nkpoints == self.wfsx.nkpoints
assert self.norbs == self.wfsx.norbs
assert self.nspin == self.wfsx.nspin
orb2m = self.get_orb2m()
for k in range(self.nkpoints):
for s in range(self.nspin):
for n in range(self.norbs):
_siesta2blanko_denvec(orb2m, self.wfsx.x[k,s,n,:,:])
self.mo_coeff = np.require(self.wfsx.x, dtype=self.dtype, requirements='CW')
self.mo_energy = np.require(self.wfsx.ksn2e, dtype=self.dtype, requirements='CW')
self.telec = kw['telec'] if 'telec' in kw else self.hsx.telec
self.nelec = kw['nelec'] if 'nelec' in kw else self.hsx.nelec
self.fermi_energy = kw['fermi_energy'] if 'fermi_energy' in kw else self.fermi_energy
ksn2fd = fermi_dirac_occupations(self.telec, self.mo_energy, self.fermi_energy)
self.mo_occ = (3-self.nspin)*ksn2fd
return self
示例2: check_distance
def check_distance(self, parent_ix, coords):
'''Check to ensure that the distance between the coordinates `coords`
and the parent_ix is less than the distance to any other center in the parent level'''
if parent_ix is None:
return True
try:
passed_coord_dtype = coords.dtype
except AttributeError:
coords = np.require(coords, dtype=coord_dtype)
else:
if passed_coord_dtype != coord_dtype:
coords = np.require(coords, dtype=coord_dtype)
coords = coords.reshape((1, -1))
assert len(coords) == 1
parent_level = self.bin_graph.node[parent_ix]['level']
level_indices = self.level_indices[parent_level]
parent_centers = self.fetch_centers(level_indices)
mask = np.ones((1,), dtype=np.bool_)
output = np.empty((1,), dtype=index_dtype)
min_dist = np.empty((1,), dtype=coord_dtype)
self._assign_level(coords, parent_centers, mask, output, min_dist)
res = output[0] == level_indices.index(parent_ix)
return res
示例3: compute_v_without_derivs
def compute_v_without_derivs(self, Xs, Yinvs, Ts):
#Turn the parts of omega into gpuarrays
Xs = np.require(Xs, dtype = np.double, requirements=['A', 'W', 'O', 'C'])
Yinvs = np.require(Yinvs, dtype = np.double, requirements=['A', 'W', 'O', 'C'])
Ts = np.require(Ts, dtype = np.double, requirements=['A', 'W', 'O', 'C'])
Xs_d = gpuarray.to_gpu(Xs)
Yinvs_d = gpuarray.to_gpu(Yinvs)
Ts_d = gpuarray.to_gpu(Ts)
#Determine N = the number of integer points to sum over
# K = the number of different omegas to compute the function at
N = self.Sd.size/self.g
K = Xs.size/(self.g**2)
#Create room on the gpu for the real and imaginary finite sum calculations
fsum_reald = gpuarray.zeros(N*K, dtype=np.double)
fsum_imagd = gpuarray.zeros(N*K, dtype=np.double)
#Turn all scalars into numpy data types
Nd = np.int32(N)
Kd = np.int32(K)
gd = np.int32(self.g)
blocksize = (self.tilewidth, self.tileheight, 1)
gridsize = (N//self.tilewidth + 1, K//self.tileheight + 1, 1)
self.finite_sum_without_derivs(fsum_reald, fsum_imagd, Xs_d, Yinvs_d, Ts_d,
self.Sd, gd, Nd, Kd,
block = blocksize,
grid = gridsize)
cuda.Context.synchronize()
fsums_real = self.sum_reduction(fsum_reald, N, K, Kd, Nd)
fsums_imag = self.sum_reduction(fsum_imagd, N, K, Kd, Nd)
return fsums_real + 1.0j*fsums_imag
示例4: load_data
def load_data(self, model_data, callback=None):
t_start = time.time()
vertices, normals = model_data
# convert python lists to numpy arrays for constructing vbos
self.vertices = numpy.require(vertices, 'f')
self.normals = numpy.require(normals, 'f')
self.scaling_factor = 1.0
self.rotation_angle = {
self.AXIS_X: 0.0,
self.AXIS_Y: 0.0,
self.AXIS_Z: 0.0,
}
self.mat_specular = (1.0, 1.0, 1.0, 1.0)
self.mat_shininess = 50.0
self.light_position = (20.0, 20.0, 20.0)
self.initialized = False
t_end = time.time()
logging.info('Initialized STL model in %.2f seconds' % (t_end - t_start))
logging.info('Vertex count: %d' % len(self.vertices))
示例5: setdiff_rows
def setdiff_rows(A, B, return_index=False):
"""
Similar to MATLAB's setdiff(A, B, 'rows'), this returns C, I
where C are the row of A that are not in B and I satisfies
C = A[I,:].
Returns I if return_index is True.
"""
A = np.require(A, requirements='C')
B = np.require(B, requirements='C')
assert A.ndim == 2, "array must be 2-dim'l"
assert B.ndim == 2, "array must be 2-dim'l"
assert A.shape[1] == B.shape[1], \
"arrays must have the same number of columns"
assert A.dtype == B.dtype, \
"arrays must have the same data type"
# NumPy provides setdiff1d, which operates only on one dimensional
# arrays. To make the array one-dimensional, we interpret each row
# as being a string of characters of the appropriate length.
orig_dtype = A.dtype
ncolumns = A.shape[1]
dtype = np.dtype((np.character, orig_dtype.itemsize*ncolumns))
C = np.setdiff1d(A.view(dtype), B.view(dtype)) \
.view(A.dtype) \
.reshape((-1, ncolumns), order='C')
if return_index:
raise NotImplementedError
else:
return C
示例6: get_next_batch
def get_next_batch(self):
self.advance_batch()
epoch = self.curr_epoch
batchnum = self.curr_batchnum
datadic = leveldb.LevelDB(self.data_dir + '/batch-%d' % batchnum)
img_raw = []
label_raw = []
for k, pickled in datadic.RangeIter():
imgdata = cPickle.loads(pickled)
img_raw.append(Image.open(c.StringIO(imgdata['data'])))
label_raw.append(imgdata['label'])
labels = n.array(label_raw)
images = n.ndarray((len(img_raw), 64 * 64 * 3), dtype=n.single)
for idx, jpegdata in enumerate(img_raw):
images[idx] = n.array(img_raw)
print labels.shape
print images.shape
images = n.require(images, dtype=n.single, requirements='C')
labels = labels.reshape((1, images.shape[1]))
labels = n.require(labels, dtype=n.single, requirements='C')
return epoch, batchnum, [images, labels]
示例7: bench
def bench():
size = 256
nframes = 4000
lag = 24
X = N.random.randn(nframes, size)
X = N.require(X, requirements = 'C')
niter = 10
# Contiguous
print "Running optimized with ctypes"
def contig(*args, **kargs):
return autocorr_oneside_nofft(*args, **kargs)
for i in range(niter):
Yt = contig(X, lag, axis = 1)
Yr = _autocorr_oneside_nofft_py(X, lag, axis = 1)
N.testing.assert_array_almost_equal(Yt, Yr, 10)
# Non contiguous
print "Running optimized with ctypes (non contiguous)"
def ncontig(*args, **kargs):
return autocorr_oneside_nofft(*args, **kargs)
X = N.require(X, requirements = 'F')
for i in range(niter):
Yt = ncontig(X, lag, axis = 1)
Yr = _autocorr_oneside_nofft_py(X, lag, axis = 1)
N.testing.assert_array_almost_equal(Yt, Yr, 10)
print "Benchmark func done"
示例8: make_predictions
def make_predictions(net, data, labels, num_classes):
data = np.require(data, requirements='C')
labels = np.require(labels, requirements='C')
preds = np.zeros((data.shape[1], num_classes), dtype=np.single)
softmax_idx = net.get_layer_idx('probs', check_type='softmax')
t0 = time.time()
net.libmodel.startFeatureWriter(
[data, labels, preds], softmax_idx)
net.finish_batch()
print "Predicted %s cases in %.2f seconds." % (
labels.shape[1], time.time() - t0)
if net.multiview_test:
# We have to deal with num_samples * num_views
# predictions.
num_views = net.test_data_provider.num_views
num_samples = labels.shape[1] / num_views
split_sections = range(
num_samples, num_samples * num_views, num_samples)
preds = np.split(preds, split_sections, axis=0)
labels = np.split(labels, split_sections, axis=1)
preds = reduce(np.add, preds)
labels = labels[0]
return preds, labels
示例9: __init__
def __init__(self, data_dir,
img_size, num_colors, # options i've add to cifar data provider
batch_range=None,
init_epoch=1, init_batchnum=None, dp_params=None, test=False):
LabeledMemoryDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)
self.num_colors = num_colors
self.img_size = img_size
self.border_size = dp_params['crop_border']
self.inner_size = self.img_size - self.border_size*2
self.multiview = dp_params['multiview_test'] and test
self.img_flip = dp_params['img_flip']
if self.img_flip:
self.num_views = 5*2
else :
self.num_views = 5;
self.data_mult = self.num_views if self.multiview else 1
for d in self.data_dic:
d['data'] = n.require(d['data'], requirements='C')
d['labels'] = n.require(n.tile(d['labels'].reshape((1, d['data'].shape[1])), (1, self.data_mult)), requirements='C')
self.cropped_data = [n.zeros((self.get_data_dims(), self.data_dic[0]['data'].shape[1]*self.data_mult), dtype=n.single) for x in xrange(2)]
self.batches_generated = 0
self.data_mean = self.batch_meta['data_mean'].reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((self.get_data_dims(), 1))
示例10: _lpc2_py
def _lpc2_py(signal, order, axis = -1):
"""python implementation of lpc for rank 2., Do not use, for testing purpose only"""
if signal.ndim > 2:
raise NotImplemented("only for rank <=2")
if signal.ndim < 2:
return lpc(_N.require(signal, requirements = 'C'), order)
# For each array of direction axis, compute levinson durbin
if axis % 2 == 0:
# Prepare output arrays
coeff = _N.zeros((order+1, signal.shape[1]), signal.dtype)
kcoeff = _N.zeros((order, signal.shape[1]), signal.dtype)
err = _N.zeros(signal.shape[1], signal.dtype)
for i in range(signal.shape[1]):
coeff[:, i], err[i], kcoeff[:, i] = \
lpc(_N.require(signal[:, i], requirements = 'C'), order)
elif axis % 2 == 1:
# Prepare output arrays
coeff = _N.zeros((signal.shape[0], order+1), signal.dtype)
kcoeff = _N.zeros((signal.shape[0], order), signal.dtype)
err = _N.zeros(signal.shape[0], signal.dtype)
for i in range(signal.shape[0]):
coeff[i], err[i], kcoeff[i] = \
lpc(_N.require(signal[i], requirements = 'C'), order)
else:
raise RuntimeError("this should not happen, please fill a bug")
return coeff, err, kcoeff
示例11: print_predictions
def print_predictions(self):
data = self.get_next_batch(train=False)[2] # get a test batch
num_classes = self.test_data_provider.get_num_classes()
softmax_idx = self.get_layer_idx('probs', check_type='softmax')
NUM_IMGS = 1
NUM_TOP_CLASSES = min(num_classes, 4) # show this many top labels
label_names = self.test_data_provider.batch_meta['label_names']
preds = n.zeros((NUM_IMGS, num_classes), dtype=n.single)
rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
data[0] = n.require(data[0][:,rand_idx], requirements='C')
data[1] = n.require(data[1][:,rand_idx], requirements='C')
data += [preds]
# Run the model
self.libmodel.startFeatureWriter(data, softmax_idx)
self.finish_batch()
data[0] = self.test_data_provider.get_plottable_data(data[0])
img_idx = 0
true_label = int(data[1][0,img_idx])
img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
print "true_label=%s" % (label_names[true_label])
for l in img_labels:
print "l=%s" % (str(l))
binary_checkpoint_file = "binary_%d.%d.ntwk" % (self.epoch, self.batchnum)
self.save_as_binary(binary_checkpoint_file)
示例12: get_next_batch
def get_next_batch(self):
self.get_next_index()
epoch = self.curr_epoch
filename = os.path.join(self.data_dir, 'data_batch_%d' % (self.curr_batch))
start = time.time()
if os.path.isdir(filename):
images = []
labels = []
for sub_filename in os.listdir(filename):
path = os.path.join(filename, sub_filename)
data = util.load(path)
images.extend(data['data'])
labels.extend(data['labels'])
data['data'] = images
data['labels'] = labels
else:
data = util.load(filename)
data = self.__multigpu_seg(data)
images = data['data']
cropped = np.ndarray((3, self.inner_size, self.inner_size, len(images) * self.num_view), dtype = np.float32)
self.__decode_trim_images2(images, cropped)
cropped = garray.reshape_last(cropped) - self.data_mean
cropped = np.require(cropped.reshape((3, self.inner_size, self.inner_size, len(images) * self.num_view)), dtype = np.single, requirements='C')
labels = np.array(labels)
labels = labels.reshape(labels.size, )
labels = np.require(labels, dtype=np.single, requirements='C')
return BatchData(cropped, labels, epoch)
示例13: get_next_batch
def get_next_batch(self):
epoch, batchnum, d = LabeledDataProvider.get_next_batch(self)
#print(datadic)
# This converts the data matrix to single precision and makes sure that it is C-ordered
d['data'] = n.require((d['data'].transpose()), dtype=n.single, requirements='C')
d['labels'] = n.require(d['labels'].reshape((1, d['data'].shape[1])), dtype=n.single, requirements='C')
return epoch, batchnum, [d['data'], d['labels']]
示例14: slit_uniform_psf
def slit_uniform_psf(n, seeing, mu_x, mu_y, tau_0, slit_width, slit_height, plot=False):
"""Returns x- and y- coordinate arrays of a 2D random uniformly distributed
circle.
Parameters
----------
n : int
Size of coordinate arrays.
seeing: double
Seeing of source psf in arcseconds.
mu_x : double
Center of PSF in x-coords.
mu_y : double
Center of PSF in y-coords.
tau_0 : double
Rotation about z-axis (tilt).
slit_width : double
Width of slit in arcseconds.
slit_height : double
Height of slit in arcseconds.
Returns
-------
slit_x : array_like
Array of x-coordinates.
slit_y : array_like
Array of y-coordinates.
"""
desc = "Source psf: uniform, mux=%.2f muy=%.2f seeing=%.2f arcsec" % (mu_x, mu_y, seeing)
log.info(desc)
# initialize output arrays to send to c function
slit_x = np.empty(n, dtype=np.float64)
slit_y = np.empty(n, dtype=np.float64)
slit_x = np.require(slit_x, requirements=ci.req_out, dtype=np.float64)
slit_y = np.require(slit_y, requirements=ci.req_out, dtype=np.float64)
func = ci.slitc.slit_uniform_psf
func.argtypes = [
ct.c_int, # n
ct.c_double, # seeing
ct.c_double, # mu_x
ct.c_double, # mu_y
ct.c_double, # tau_0
ct.c_double, # slit_width
ct.c_double, # slit_height
ci.array_1d_double, # slit_x
ci.array_1d_double] # slit_y
func.restype = None
log.info("Slit Rejection Sampling: %s rays...", n)
func(n, seeing, mu_x, mu_y, tau_0, slit_width, slit_height, slit_x, slit_y)
# preview slit
if plot:
log.info("Opening preview plot of 2D uniformly random psf.")
import matplotlib.pylab as plt
fig = plt.figure()
ax = fig.add_subplot(111)#, aspect='equal')
ax.scatter(slit_x, slit_y, s=20, edgecolor=None)
plt.title("0D Point Source PSF")
plt.show()
return slit_x, slit_y
示例15: comp_apair_pp_libint
def comp_apair_pp_libint(self, a1,a2):
""" Get's the vertex coefficient and conversion coefficients for a pair of atoms given by their atom indices """
from operator import mul
from pyscf.nao.m_prod_biloc import prod_biloc_c
if not hasattr(self, 'sv_pbloc_data') : raise RuntimeError('.sv_pbloc_data is absent')
assert a1>=0
assert a2>=0
t1 = timer()
sv = self.sv
aos = self.sv.ao_log
sp12 = np.require( np.array([sv.atom2sp[a] for a in (a1,a2)], dtype=c_int64), requirements='C')
rc12 = np.require( np.array([sv.atom2coord[a,:] for a in (a1,a2)]), requirements='C')
icc2a = np.require( np.array(self.ls_contributing(a1,a2), dtype=c_int64), requirements='C')
npmx = aos.sp2norbs[sv.atom2sp[a1]]*aos.sp2norbs[sv.atom2sp[a2]]
npac = sum([self.prod_log.sp2norbs[sv.atom2sp[ia]] for ia in icc2a ])
nout = c_int64(npmx**2+npmx*npac+10)
dout = np.require( zeros(nout.value), requirements='CW')
libnao.vrtx_cc_apair( sp12.ctypes.data_as(POINTER(c_int64)), rc12.ctypes.data_as(POINTER(c_double)), icc2a.ctypes.data_as(POINTER(c_int64)), c_int64(len(icc2a)), dout.ctypes.data_as(POINTER(c_double)), nout )
if dout[0]<1: return None
nnn = np.array(dout[0:3], dtype=int)
nnc = np.array([dout[8],dout[7]], dtype=int)
ncc = int(dout[9])
if ncc!=len(icc2a): raise RuntimeError('ncc!=len(icc2a)')
s = 10; f=s+np.prod(nnn); vrtx = dout[s:f].reshape(nnn)
s = f; f=s+np.prod(nnc); ccoe = dout[s:f].reshape(nnc)
icc2s = np.zeros(len(icc2a)+1, dtype=np.int64)
for icc,a in enumerate(icc2a): icc2s[icc+1] = icc2s[icc] + self.prod_log.sp2norbs[sv.atom2sp[a]]
pbiloc = prod_biloc_c(atoms=array([a2,a1]),vrtx=vrtx,cc2a=icc2a,cc2s=icc2s,cc=ccoe)
return pbiloc