本文整理汇总了Python中numpy.resize函数的典型用法代码示例。如果您正苦于以下问题:Python resize函数的具体用法?Python resize怎么用?Python resize使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了resize函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: genImg
def genImg(self,xml,compressionLevel):
xml = re.split('\n',xml)
img_data = re.split('"',xml[1])
type = img_data[1]
size = [int(x) for x in img_data[3].split(',')]
compressed = img_data[5]
pixels = xml[2]
pixels = base64.b64decode(pixels) # decode base 64 encoding
if compressed == 'True':
pixels = zlib.decompress(pixels) # if data was compressed, decompress it
pixels = list(pixels) # converting byte data into a list, which will give us the actual numbers of ndarray(i.e. the image)
if type == 'Gray': # Based on image type, reconstruct numpy.ndarray
return numpy.resize(pixels,tuple(size))
else:
r = pixels[:size[0]*size[1]]
g = pixels[size[0]*size[1]:2*size[0]*size[1]]
b = pixels[2*size[0]*size[1]:3*size[0]*size[1]]
image = []
for i in range(size[0] * size[1]):
image.append(r[i])
image.append(g[i])
image.append(b[i])
size.append(3)
return numpy.resize(image,tuple(size))
示例2: batchsd
def batchsd(trace, batches=5):
"""
Calculates the simulation standard error, accounting for non-independent
samples. The trace is divided into batches, and the standard deviation of
the batch means is calculated.
"""
if len(np.shape(trace)) > 1:
dims = np.shape(trace)
# ttrace = np.transpose(np.reshape(trace, (dims[0], sum(dims[1:]))))
ttrace = np.transpose([t.ravel() for t in trace])
return np.reshape([batchsd(t, batches) for t in ttrace], dims[1:])
else:
if batches == 1:
return np.std(trace) / np.sqrt(len(trace))
try:
batched_traces = np.resize(trace, (batches, len(trace) / batches))
except ValueError:
# If batches do not divide evenly, trim excess samples
resid = len(trace) % batches
batched_traces = np.resize(trace[:-resid],
(batches, len(trace[:-resid]) / batches))
means = np.mean(batched_traces, 1)
return np.std(means) / np.sqrt(batches)
示例3: aabut
def aabut (source, *args):
"""
Like the |Stat abut command. It concatenates two arrays column-wise
and returns the result. CAUTION: If one array is shorter, it will be
repeated until it is as long as the other.
Usage: aabut (source, args) where args=any # of arrays
Returns: an array as long as the LONGEST array past, source appearing on the
'left', arrays in <args> attached on the 'right'.
"""
if len(source.shape)==1:
width = 1
source = N.resize(source,[source.shape[0],width])
else:
width = source.shape[1]
for addon in args:
if len(addon.shape)==1:
width = 1
addon = N.resize(addon,[source.shape[0],width])
else:
width = source.shape[1]
if len(addon) < len(source):
addon = N.resize(addon,[source.shape[0],addon.shape[1]])
elif len(source) < len(addon):
source = N.resize(source,[addon.shape[0],source.shape[1]])
source = N.concatenate((source,addon),1)
return source
示例4: mc_error
def mc_error(x, batches=5):
"""
Calculates the simulation standard error, accounting for non-independent
samples. The trace is divided into batches, and the standard deviation of
the batch means is calculated.
:Arguments:
x : Numpy array
An array containing MCMC samples
batches : integer
Number of batchas
"""
if x.ndim > 1:
dims = np.shape(x)
#ttrace = np.transpose(np.reshape(trace, (dims[0], sum(dims[1:]))))
trace = np.transpose([t.ravel() for t in x])
return np.reshape([mc_error(t, batches) for t in trace], dims[1:])
else:
if batches == 1: return np.std(x)/np.sqrt(len(x))
try:
batched_traces = np.resize(x, (batches, len(x)/batches))
except ValueError:
# If batches do not divide evenly, trim excess samples
resid = len(x) % batches
batched_traces = np.resize(x[:-resid], (batches, len(x)/batches))
means = np.mean(batched_traces, 1)
return np.std(means)/np.sqrt(batches)
示例5: draw_lnm_samples
def draw_lnm_samples(**kwargs):
''' Draw samples for uniform-in-log model
Parameters
----------
**kwargs: string
Keyword arguments as model parameters and number of samples
Returns
-------
array
The first mass
array
The second mass
'''
#PDF doesnt match with sampler
nsamples = kwargs.get('nsamples', 1)
min_mass = kwargs.get('min_mass', 5.)
max_mass = kwargs.get('max_mass', 95.)
max_mtotal = min_mass + max_mass
lnmmin = log(min_mass)
lnmmax = log(max_mass)
k = nsamples * int(1.5 + log(1 + 100./nsamples))
aa = np.exp(np.random.uniform(lnmmin, lnmmax, k))
bb = np.exp(np.random.uniform(lnmmin, lnmmax, k))
idx = np.where(aa + bb < max_mtotal)
m1, m2 = (np.maximum(aa, bb))[idx], (np.minimum(aa, bb))[idx]
return np.resize(m1, nsamples), np.resize(m2, nsamples)
示例6: test
def test(npoints):
xx = numpy.arange(npoints)
xx=numpy.resize(xx,(npoints,1))
#yy = 1000.0 * exp (- 0.5 * (xx * xx) /15)+ 2.0 * xx + 10.5
yy = gauss([10.5,2,1000.0,20.,15],xx)
yy=numpy.resize(yy,(npoints,1))
sy = numpy.sqrt(abs(yy))
sy=numpy.resize(sy,(npoints,1))
data = numpy.concatenate((xx, yy, sy),1)
parameters = [0.0,1.0,900.0, 25., 10]
stime = time.time()
if 0:
#old fashion
fittedpar, chisq, sigmapar = LeastSquaresFit(gauss,parameters,data)
else:
#easier to handle
fittedpar, chisq, sigmapar = LeastSquaresFit(gauss,parameters,
xdata=xx.reshape((-1,)),
ydata=yy.reshape((-1,)),
sigmadata=sy.reshape((-1,)))
etime = time.time()
print("Took ",etime - stime, "seconds")
print("chi square = ",chisq)
print("Fitted pars = ",fittedpar)
print("Sigma pars = ",sigmapar)
示例7: __init__
def __init__(self, name, geometry, order, init_context=True):
LagrangeSimplexPolySpace.__init__(self, name, geometry, order,
init_context=False)
nodes, nts, node_coors = self.nodes, self.nts, self.node_coors
shape = [nts.shape[0] + 1, 2]
nts = nm.resize(nts, shape)
nts[-1,:] = [3, 0]
shape = [nodes.shape[0] + 1, nodes.shape[1]]
nodes = nm.resize(nodes, shape)
# Make a 'hypercubic' (cubic in 2D) node.
nodes[-1,:] = 1
n_v = self.geometry.n_vertex
tmp = nm.ones((n_v,), nm.int32)
node_coors = nm.vstack((node_coors,
nm.dot(tmp, self.geometry.coors) / n_v))
self.nodes, self.nts = nodes, nts
self.node_coors = nm.ascontiguousarray(node_coors)
self.bnode = nodes[-1:,:]
self.n_nod = self.nodes.shape[0]
if init_context:
self.eval_ctx = self.create_context(None, 0, 1e-15, 100, 1e-8,
tdim=n_v - 1)
else:
self.eval_ctx = None
示例8: test_QuaternionClass
def test_QuaternionClass(self):
v1 = np.array([0.2, 0.2, 0.4])
v2 = np.array([1, 0, 0])
q1 = Quaternion.q_exp(v1)
q2 = Quaternion.q_exp(v2)
v=np.array([1, 2, 3])
# Testing Mult and rotate
np.testing.assert_almost_equal(Quaternion.q_rotate(Quaternion.q_mult(q1,q2),v), Quaternion.q_rotate(q1,Quaternion.q_rotate(q2,v)), decimal=7)
np.testing.assert_almost_equal(Quaternion.q_rotate(q1,v2), np.resize(Quaternion.q_toRotMat(q1),(3,3)).dot(v2), decimal=7)
# Testing Boxplus, Boxminus, Log and Exp
np.testing.assert_almost_equal(Quaternion.q_boxPlus(q1,Quaternion.q_boxMinus(q2,q1)), q2, decimal=7)
np.testing.assert_almost_equal(Quaternion.q_log(q1), v1, decimal=7)
# Testing Lmat and Rmat
np.testing.assert_almost_equal(Quaternion.q_mult(q1,q2), Quaternion.q_Lmat(q1).dot(q2), decimal=7)
np.testing.assert_almost_equal(Quaternion.q_mult(q1,q2), Quaternion.q_Rmat(q2).dot(q1), decimal=7)
# Testing ypr and quat
roll = 0.2
pitch = -0.5
yaw = 2.5
q_test = Quaternion.q_mult(np.array([np.cos(0.5*pitch), 0, np.sin(0.5*pitch), 0]),np.array([np.cos(0.5*yaw), 0, 0, np.sin(0.5*yaw)]))
q_test = Quaternion.q_mult(np.array([np.cos(0.5*roll), np.sin(0.5*roll), 0, 0]),q_test)
np.testing.assert_almost_equal(Quaternion.q_toYpr(q_test), np.array([roll, pitch, yaw]), decimal=7)
# Testing Jacobian of Ypr
for i in np.arange(0,3):
dv1 = np.array([0.0, 0.0, 0.0])
dv1[i] = 1.0
epsilon = 1e-6
ypr1 = Quaternion.q_toYpr(q1)
ypr1_dist = Quaternion.q_toYpr(Quaternion.q_boxPlus(q1,dv1*epsilon))
dypr1_1 = (ypr1_dist-ypr1)/epsilon
J = np.resize(Quaternion.q_toYprJac(q1),(3,3))
dypr1_2 = J.dot(dv1)
np.testing.assert_almost_equal(dypr1_1,dypr1_2, decimal=5)
示例9: explicitmidpoint
def explicitmidpoint(ode, vardict, soln, h, relerr):
"""
Implementation of the Explicit Midpoint method.
"""
eqnum = len(ode)
dim = [eqnum, 2]
dim.extend(soln[0][0].shape)
dim = tuple(dim)
if numpy.iscomplexobj(soln[0]):
aux = numpy.resize([0. + 0j], dim)
else:
aux = numpy.resize([0.], dim)
dim = soln[0][0].shape
for vari in range(eqnum):
vardict.update({'y_{}'.format(vari): soln[vari][-1]})
for vari in range(eqnum):
aux[vari][0] = numpy.resize([seval(ode[vari], **vardict) * h[0] + soln[vari][-1]], dim)
for vari in range(eqnum):
vardict.update({"y_{}".format(vari): aux[vari][0]})
vardict.update({'t': vardict['t'] + 0.5 * h[0]})
for vari in range(eqnum):
aux[vari][0] = numpy.resize([seval(ode[vari], **vardict)], dim)
for vari in range(eqnum):
vardict.update({"y_{}".format(vari): numpy.array(soln[vari][-1] + h[0] * aux[vari][0])})
pt = soln[vari]
kt = numpy.array([vardict['y_{}'.format(vari)]])
soln[vari] = numpy.concatenate((pt, kt))
vardict.update({'t': vardict['t'] + 0.5 * h[0]})
示例10: append
def append(self, value):
# convert the appended object to an array if it starts as something else
if type(value) is not np.ndarray:
value = np.array(value)
# add the data
if value.ndim == 1:
# adding a single row of data
n = self.__n
if n + 1 > self.__N:
# need to allocate more memory
self.__N += self.__n_grow
self.__data = np.resize(self.__data, (self.__N, self.__cols))
self.__data[n] = value
self.__n = n + 1
elif value.ndim == 2:
# adding multiple rows of data
# avoid loops for appending large arrays
n = self.__n
L = value.shape[0]
N_needed = n + L - self.__N
if N_needed > 0:
# need to allocate more memory
self.__N += (N_needed / self.__n_grow + 1) * self.__n_grow
self.__data = np.resize(self.__data, (self.__N, self.__cols))
self.__data[n:n+L] = value
self.__n += L
示例11: initialize
def initialize(dx, x_shore, eta_shore, eta_toe, S_d, S_b, S, Q_w, B0):
"""Initialize the variables for the simulation.
Args:
Returns:
eta_b : array of z-coordinate of the basement at every node. Has the
same size as dx. [ L ]
Comments:
"""
# First thing we should do is specify our computational domain.
N, dx_shore = nodes_in_domain(x_shore, dx)
N_old = N.copy()
dx = init_domain(N, dx_shore, dx)
x_shore = x(dx)[-1]
# Then we compute the basement elevation and the location of the delta toe.
eta_b = np.resize(eta_toe, N)
eta_b, x_toe = init_basement(dx, eta_shore, eta_toe, eta_b, S_d, S_b)
# Next, we compute the initial fluvial profile
eta, S = init_flumen(dx, eta_shore, S)
# Instantiate a water-depth array for the domain.
H = np.zeros_like(dx)
# Compute unit flow rate
qw = unit_flowrate(Q_w, B0) # [ L**2 / T ]
# Compute critical depth for the section.
Hc = critical_flow(qw)
# Redefine B0 as a vector, to have that information on every node.
B0 = np.resize(B0, N)
# Instantiate a sediment transport capacity array for the domain
qt = np.zeros_like(dx)
return (N, N_old, dx_shore, dx, x_shore, eta_b, x_toe, eta, S, H, Hc, qw,
B0, qt)
示例12: _wrap
def _wrap(vector, pad_tuple, iaxis, kwargs):
'''
Private function to calculate the before/after vectors for pad_wrap.
Parameters
----------
vector : ndarray
Input vector that already includes empty padded values.
pad_tuple : tuple
This tuple represents the (before, after) width of the padding
along this particular iaxis.
iaxis : int
The axis currently being looped across. Not used in _wrap.
kwargs : keyword arguments
Keyword arguments. Not used in _wrap.
Return
------
_wrap : ndarray
Padded vector
'''
if pad_tuple[1] == 0:
after_vector = vector[pad_tuple[0]:None]
else:
after_vector = vector[pad_tuple[0]:-pad_tuple[1]]
before_vector = np.resize(after_vector[::-1], pad_tuple[0])[::-1]
after_vector = np.resize(after_vector, pad_tuple[1])
return _create_vector(vector, pad_tuple, before_vector, after_vector)
示例13: cube
def cube():
"""
Build vertices for a colored cube.
V is the vertices
I1 is the indices for a filled cube (use with GL_TRIANGLES)
I2 is the indices for an outline cube (use with GL_LINES)
"""
vtype = [('a_position', np.float32, 3),
('a_normal' , np.float32, 3),
('a_color', np.float32, 4)]
# Vertices positions
v = [ [ 1, 1, 1], [-1, 1, 1], [-1,-1, 1], [ 1,-1, 1],
[ 1,-1,-1], [ 1, 1,-1], [-1, 1,-1], [-1,-1,-1] ]
# Face Normals
n = [ [ 0, 0, 1], [ 1, 0, 0], [ 0, 1, 0] ,
[-1, 0, 1], [ 0,-1, 0], [ 0, 0,-1] ]
# Vertice colors
c = [ [0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 1, 0, 1],
[1, 1, 0, 1], [1, 1, 1, 1], [1, 0, 1, 1], [1, 0, 0, 1] ];
V = np.array([(v[0],n[0],c[0]), (v[1],n[0],c[1]), (v[2],n[0],c[2]), (v[3],n[0],c[3]),
(v[0],n[1],c[0]), (v[3],n[1],c[3]), (v[4],n[1],c[4]), (v[5],n[1],c[5]),
(v[0],n[2],c[0]), (v[5],n[2],c[5]), (v[6],n[2],c[6]), (v[1],n[2],c[1]),
(v[1],n[3],c[1]), (v[6],n[3],c[6]), (v[7],n[3],c[7]), (v[2],n[3],c[2]),
(v[7],n[4],c[7]), (v[4],n[4],c[4]), (v[3],n[4],c[3]), (v[2],n[4],c[2]),
(v[4],n[5],c[4]), (v[7],n[5],c[7]), (v[6],n[5],c[6]), (v[5],n[5],c[5]) ],
dtype = vtype)
I1 = np.resize( np.array([0,1,2,0,2,3], dtype=np.uint32), 6*(2*3))
I1 += np.repeat( 4*np.arange(2*3), 6)
I2 = np.resize( np.array([0,1,1,2,2,3,3,0], dtype=np.uint32), 6*(2*4))
I2 += np.repeat( 4*np.arange(6), 8)
return V, I1, I2
示例14: plot_image
def plot_image(self, image, nb_repeat=40, show_plot=True):
"""Plot augmented variations of an image.
This method takes an image and plots it by default in 40 differently
augmented versions.
This method is intended to visualize the strength of your chosen
augmentations (so for debugging).
Args:
image: The image to plot.
nb_repeat: How often to plot the image. Each time it is plotted,
the chosen augmentation will be different. (Default: 40).
show_plot: Whether to show the plot. False makes sense if you
don't have a graphical user interface on the machine.
(Default: True)
Returns:
The figure of the plot.
Use figure.savefig() to save the image.
"""
if len(image.shape) == 2:
images = np.resize(image, (nb_repeat, image.shape[0], image.shape[1]))
else:
images = np.resize(image, (nb_repeat, image.shape[0], image.shape[1],
image.shape[2]))
return self.plot_images(images, True, show_plot=show_plot)
示例15: _assignInitialPoints
def _assignInitialPoints(cvmat,S):
h,w,c = cvmat.shape
# Compute the max grid assignment
nx = w/S
ny = h/S
# Compute the super pixel x,y grid
xgrid = np.arange(nx).reshape(1,nx)*np.ones(ny,dtype=np.int).reshape(ny,1)
ygrid = np.arange(ny).reshape(ny,1)*np.ones(nx,dtype=np.int).reshape(1,nx)
# compute an x,y lookup to a label look up
label_map = nx*ygrid + xgrid
# Compute the x groups in pixel space
tmp = np.arange(nx)
tmp = np.resize(tmp,(w,))
xgroups = tmp[tmp.argsort()]
# Compute the y groups in pixel space
tmp = np.arange(ny)
tmp = np.resize(tmp,(h,))
ygroups = tmp[tmp.argsort()]
labels = np.zeros((h,w),dtype=np.int)
for x in range(w):
for y in range(h):
labels[y,x] = label_map[ygroups[y],xgroups[x]]
return label_map,xgroups,ygroups,labels