本文整理汇总了Python中numpy.empty_like函数的典型用法代码示例。如果您正苦于以下问题:Python empty_like函数的具体用法?Python empty_like怎么用?Python empty_like使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了empty_like函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, in_size, out_size, encode_size,
Wscale=1.0, Vscale=1.0, Uscale=1.0,
nobias=False, bias=0.0, forget_bias=1.0):
self.bias = np.float32(bias)
self.nobias = nobias
self.in_size = in_size
self.out_size = out_size
self.encode_size = encode_size
self.forget_bias = np.float32(forget_bias)
#initialize weight matrices
self.W = cpu.utils.weight_initialization(in_size, out_size*4, Wscale)
self.gW = np.empty_like(self.W)
self.V = cpu.utils.weight_initialization(out_size, out_size*4, Vscale)
self.gV = np.empty_like(self.V)
self.U = cpu.utils.weight_initialization(encode_size, out_size*4, Uscale)
self.gU = np.empty_like(self.U)
if not self.nobias:
self.b = np.empty((1, out_size*4), dtype=np.float32)
self.b.fill(self.bias)
self.b[0,out_size:out_size*2] = self.forget_bias
self.gb = np.empty_like(self.b)
self.z = None
示例2: hsv_to_rgb
def hsv_to_rgb(hsv):
"""
convert hsv values in a numpy array to rgb values
both input and output arrays have shape (M,N,3)
"""
h = hsv[:,:,0]; s = hsv[:,:,1]; v = hsv[:,:,2]
r = np.empty_like(h); g = np.empty_like(h); b = np.empty_like(h)
i = (h*6.0).astype(np.int)
f = (h*6.0) - i
p = v*(1.0 - s)
q = v*(1.0 - s*f)
t = v*(1.0 - s*(1.0-f))
idx = i%6 == 0
r[idx] = v[idx]; g[idx] = t[idx]; b[idx] = p[idx]
idx = i == 1
r[idx] = q[idx]; g[idx] = v[idx]; b[idx] = p[idx]
idx = i == 2
r[idx] = p[idx]; g[idx] = v[idx]; b[idx] = t[idx]
idx = i == 3
r[idx] = p[idx]; g[idx] = q[idx]; b[idx] = v[idx]
idx = i == 4
r[idx] = t[idx]; g[idx] = p[idx]; b[idx] = v[idx]
idx = i == 5
r[idx] = v[idx]; g[idx] = p[idx]; b[idx] = q[idx]
idx = s == 0
r[idx] = v[idx]; g[idx] = v[idx]; b[idx] = v[idx]
rgb = np.empty_like(hsv)
rgb[:,:,0]=r; rgb[:,:,1]=g; rgb[:,:,2]=b
return rgb
示例3: search_for_channel
def search_for_channel(source_area, routys, routxs, search=2, tol=10):
"""Search neighboring grid cells for channel"""
log.debug('serching for channel')
new_ys = np.empty_like(routys)
new_xs = np.empty_like(routxs)
for i, (y, x) in enumerate(zip(routys, routxs)):
area0 = source_area[y, x]
search_area = source_area[y-search:y+search+1, x-search:x+search+1]
if np.any(search_area > area0*tol):
sy, sx = np.unravel_index(search_area.argmax(), search_area.shape)
new_ys[i] = y + sy - search
new_xs[i] = x + sx - search
log.debug('Moving pour point to channel y: '
'{0}->{1}, x: {2}->{3}'.format(y, new_ys[i],
x, new_xs[i]))
log.debug('Source Area has increased from {0}'
' to {1}'.format(area0, source_area[new_ys[i], new_xs[i]]))
else:
new_ys[i] = y
new_xs[i] = x
return new_ys, new_xs
示例4: mso_r_lat_lon_position
def mso_r_lat_lon_position(time, mso=False, sza=False, **kwargs):
"""Returns position in MSO spherical polar coordinates.
With `mso' set, return [r/lat/lon], [mso x/y/z [km]].
With `sza' set, return [r/lat/lon], [sza [deg]].
With both, return return [r/lat/lon], [mso x/y/z [km]], [sza [deg]]."""
if sza:
pos = position(time, frame = 'MAVEN_MSO', **kwargs)
sza = np.rad2deg(np.arctan2(np.sqrt(pos[1]**2 + pos[2]**2), pos[0]))
if isinstance(sza, np.ndarray):
inx = sza < 0.
if np.any(inx):
sza[inx] = 180. + sza[inx]
elif sza < 0.0:
sza = 180. + sza
tmp = reclat(pos)
tmp_out = np.empty_like(tmp)
tmp_out[0] = tmp[0]
tmp_out[1] = np.rad2deg(tmp[2])
tmp_out[2] = np.rad2deg(tmp[1])
if mso:
return tmp_out, pos, sza
return tmp_out, sza
else:
pos = position(time, frame = 'MAVEN_MSO', **kwargs)
tmp = reclat(pos)
tmp_out = np.empty_like(tmp)
tmp_out[0] = tmp[0]
tmp_out[1] = np.rad2deg(tmp[2])
tmp_out[2] = np.rad2deg(tmp[1])
if mso:
return tmp_out, pos
return tmp_out
示例5: getDataStructures
def getDataStructures(self):
"""Initializes and returns data structures with proper shapes for:
- activation of the layer (accessible later as ass[l])
- dError of the layer (accessible later as dErrors[l])"""
dError = np.empty_like(self.b)
a = np.empty_like(self.b)
return a, dError
示例6: ibarrier
def ibarrier(timeout=None, root=0, tag=123, comm=world):
"""Non-blocking barrier returning a list of requests to wait for.
An optional time-out may be given, turning the call into a blocking
barrier with an upper time limit, beyond which an exception is raised."""
requests = []
byte = np.ones(1, dtype=np.int8)
if comm.rank == root:
for rank in range(0,root) + range(root+1,comm.size): #everybody else
rbuf, sbuf = np.empty_like(byte), byte.copy()
requests.append(comm.send(sbuf, rank, tag=2 * tag + 0,
block=False))
requests.append(comm.receive(rbuf, rank, tag=2 * tag + 1,
block=False))
else:
rbuf, sbuf = np.empty_like(byte), byte
requests.append(comm.receive(rbuf, root, tag=2 * tag + 0, block=False))
requests.append(comm.send(sbuf, root, tag=2 * tag + 1, block=False))
if comm.size == 1 or timeout is None:
return requests
t0 = time.time()
while not comm.testall(requests): # automatic clean-up upon success
if time.time() - t0 > timeout:
raise RuntimeError('MPI barrier timeout.')
return []
示例7: psup_O
def psup_O(exits, P, R, O_shape, P_heatmap = None, alpha = 1.0e-10):
OT = np.zeros(O_shape, P.dtype)
# Calculate denominator
#----------------------
# but only do this if it hasn't been done already
# (we must set P_heatmap = None when the probe/coords has changed)
if P_heatmap is None :
P_heatmapT = era.make_P_heatmap(P, R, O_shape)
P_heatmap = np.empty_like(P_heatmapT)
comm.Allreduce([P_heatmapT, MPI.__TypeDict__[P_heatmapT.dtype.char]], \
[P_heatmap, MPI.__TypeDict__[P_heatmap.dtype.char]], \
op=MPI.SUM)
# Calculate numerator
#--------------------
for r, exit in zip(R, exits):
OT[-r[0]:P.shape[0]-r[0], -r[1]:P.shape[1]-r[1]] += exit * P.conj()
# divide
# here we need to do an all reduce
#---------------------------------
O = np.empty_like(OT)
comm.Allreduce([OT, MPI.__TypeDict__[OT.dtype.char]], \
[O, MPI.__TypeDict__[O.dtype.char]], \
op=MPI.SUM)
O = O / (P_heatmap + alpha)
return O, P_heatmap
示例8: load
def load(self, path2file, dtype='float32', iter_count=None, thresholds=None):
fmt = os.path.splitext(path2file)[-1]
print('Loading segmentation from file %s ...' % path2file)
if fmt == 'npz':
a = np.load(path2file)
self.thresholds = a['thresholds']
self.iter_count = a['iter_count']
self.sdf = a['sdf']
elif fmt == 'mat':
a = sio.loadmat(path2file)
self.thresholds = a['thresholds']
self.iter_count = a['iter_count']
self.sdf = a['sdf']
elif fmt == 'bin':
self.thresholds = thresholds
self.sdf = np.fromfile(path2file, dtype=dtype).reshape(thresholds.shape+self.im.shape)
self.iter_count = iter_count
else:
raise KeyError('File format not understood!')
# Reinitialize variables
self.nthresh = np.ndim(self.thresholds)
self.im_ave = np.empty_like(self.im, dtype=self.dtype)
self.im_error = np.empty_like(self.im, dtype=self.dtype)
print('Calculating means and error with the loaded SDF ...')
update_regions(self.im, self.sdf, self.im_ave, self.im_error)
print('Done!')
示例9: initialize
def initialize(self):
"""
Initialize the segmentation.
:param thresholds:
:return:
"""
# Initialize variables
self.thresholds = require_array(self.kwargs.get('thresholds'), dtype=self.dtype)
self.nthresh = np.ndim(self.thresholds)
self.sdf = np.empty((self.nthresh, ) + self.im.shape, dtype=self.dtype)
self.im_ave = np.empty_like(self.im, dtype=self.dtype)
self.im_error = np.empty_like(self.im, dtype=self.dtype)
self.iter_count = 0
# Initialize regions
print('Initializing SDF and calculating im_ave & im_error ...')
init_regions(self.im, self.thresholds, self.im_ave, self.im_error, self.sdf)
# Reinitialize SDF
print('Reinitializing SDF ...')
for i in xrange(self.nthresh):
im3D.sdf.inplace.reinit(self.sdf[i], self.sdf[i],
dt=self.kwargs['init_reinit_dt'],
tol=self.kwargs['init_reinit_tol'],
band=self.kwargs['init_reinit_band'],
max_it=self.kwargs['init_reinit_max_it'],
subcell=self.kwargs['init_reinit_subcell'],
WENO=self.kwargs['init_reinit_weno'],
verbose=True)
示例10: get_harmonic_power1
def get_harmonic_power1(self, interval_range=(15.,50.), harmonics=None, sample_interval=3):
"""
return a 1D array of strength of harmonic peaks for each time
in spectrum.times
interval_range (2-tuple) .. harmonic interval range to search for best match
harmonics (None, int or iterable of ints) .. the harmonics to match. ex: [2,3] will ignore
the influence of the first harmonic. ex: 3 will try to match [1,2,3]. None will
use the default
"""
print 'hpower1'
if harmonics is None: harmonics=self.harmonics
sample_times = self.spectrum.times[::sample_interval]
fpnt_shape = len(harmonics), sample_times.shape[0]
fpnt = np.empty(fpnt_shape)
pwrs = np.empty_like(sample_times)
ints = np.empty_like(sample_times)
for i,t in enumerate(sample_times):
res = self.get_peaks(t, harmonics=harmonics, interval_range=interval_range)
fpnt[:,i] = res[1]
pwrs[i] = res[2]
ints[i] = res[3]
tlen = self.spectrum.times.shape[0]
self.fingerprint = np.zeros((self.harmonics.shape[0], tlen))
for i in range(len(harmonics)):
self.fingerprint[i,:] = fast_resample(fpnt[i,:], tlen)
self.harmonic_power = fast_resample(pwrs, tlen)
self.harmonic_intvl = fast_resample(ints, tlen)
return self.harmonic_power, self.harmonic_intvl
示例11: average_passive_aggressive
def average_passive_aggressive(feature_matrix, labels, T, L):
theta = np.empty_like(feature_matrix[0])
theta.fill(0.)
theta_empty = np.empty_like(feature_matrix[0])
theta_empty.fill(0.)
theta_sum = theta
theta_0 = 0.0
theta_0_sum = theta_0
ticker = 0
update_track = 0
while ticker < T:
for i in range(len(feature_matrix)):
(theta_new, theta_0_new) = passive_aggressive_single_step_update(feature_matrix[i], labels[i], L, theta, theta_0)
if np.any(np.subtract(theta_new, theta)) or theta_0_new - theta_0 != 0: #Select for the instances where the theta actually gets updated
theta_sum = np.add(theta_new, theta_sum)
theta_0_sum += theta_0_new
update_track += 1
theta = theta_new
theta_0 = theta_0_new
ticker += 1
theta_average = np.divide(theta_sum, update_track)
theta_0_average = theta_0_sum/update_track
return (theta_average, theta_0_average)
示例12: test_prepared_invocation
def test_prepared_invocation(self):
a = np.random.randn(4,4).astype(np.float32)
a_gpu = drv.mem_alloc(a.size * a.dtype.itemsize)
drv.memcpy_htod(a_gpu, a)
mod = SourceModule("""
__global__ void doublify(float *a)
{
int idx = threadIdx.x + threadIdx.y*blockDim.x;
a[idx] *= 2;
}
""")
func = mod.get_function("doublify")
func.prepare("P")
func.prepared_call((1, 1), (4,4,1), a_gpu, shared_size=20)
a_doubled = np.empty_like(a)
drv.memcpy_dtoh(a_doubled, a_gpu)
print (a)
print (a_doubled)
assert la.norm(a_doubled-2*a) == 0
# now with offsets
func.prepare("P")
a_quadrupled = np.empty_like(a)
func.prepared_call((1, 1), (15,1,1), int(a_gpu)+a.dtype.itemsize)
drv.memcpy_dtoh(a_quadrupled, a_gpu)
assert la.norm(a_quadrupled[1:]-4*a[1:]) == 0
示例13: feature_meanstd
def feature_meanstd(mat):
"""
Utility function that does in-place normalization of features.
Input:
mat: the local data matrix, each row is a feature vector and each
column is a feature dim
Output:
m: the mean for each dimension
std: the standard deviation for each dimension
"""
# subtract mean
N = mpi.COMM.allreduce(mat.shape[0])
m = np.empty_like(mat[0])
mpi.COMM.Allreduce(np.sum(mat, axis=0), m)
m /= N
# we perform in-place modifications
mat -= m
# normalize variance
std = np.empty_like(mat[0])
mpi.COMM.Allreduce(np.sum(mat ** 2, axis=0), std)
std /= N
# we also add a regularization term
std = np.sqrt(std) + np.finfo(np.float64).eps
# recover the original mat
mat += m
return m, std
示例14: get_data_frames
def get_data_frames(llh_file):
"""
Loads data from stored hdf5 file into a data frame for each
combination of 'pseudo_data | hypo'
"""
fh = h5py.File(llh_file, "r")
data_frames = []
for dFlag in ["data_NMH", "data_IMH"]:
for hFlag in ["hypo_NMH", "hypo_IMH"]:
keys = fh["trials"][dFlag][hFlag].keys()
entries = len(fh["trials"][dFlag][hFlag][keys[0]])
data = {key: np.array(fh["trials"][dFlag][hFlag][key]) for key in keys}
data["pseudo_data"] = np.empty_like(data[keys[0]], dtype="|S16")
data["pseudo_data"][:] = dFlag
data["hypo"] = np.empty_like(data[keys[0]], dtype="|S16")
data["hypo"][:] = hFlag
df = DataFrame(data)
data_frames.append(df)
fh.close()
return data_frames
示例15: test_structuring_element8
def test_structuring_element8():
# check the output for a custom structuring element
r = np.array([[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 255, 0, 0, 0],
[0, 0, 255, 255, 255, 0],
[0, 0, 0, 255, 255, 0],
[0, 0, 0, 0, 0, 0]])
# 8-bit
image = np.zeros((6, 6), dtype=np.uint8)
image[2, 2] = 255
elem = np.asarray([[1, 1, 0], [1, 1, 1], [0, 0, 1]], dtype=np.uint8)
out = np.empty_like(image)
mask = np.ones(image.shape, dtype=np.uint8)
rank.maximum(image=image, selem=elem, out=out, mask=mask,
shift_x=1, shift_y=1)
assert_equal(r, out)
# 16-bit
image = np.zeros((6, 6), dtype=np.uint16)
image[2, 2] = 255
out = np.empty_like(image)
rank.maximum(image=image, selem=elem, out=out, mask=mask,
shift_x=1, shift_y=1)
assert_equal(r, out)