本文整理汇总了Python中numpy.product函数的典型用法代码示例。如果您正苦于以下问题:Python product函数的具体用法?Python product怎么用?Python product使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了product函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: points_to_basis_dists
def points_to_basis_dists(self, points):
assert is_mat(points)
assert is_float(points)
(N, D) = points.shape
assert D == self.grid.get_dim()
G = self.grid
# Get indices
cell_coords = G.points_to_cell_coords(points)
# Get rel distances
rel_dist = G.points_to_low_vertex_rel_distance(points, cell_coords)
assert (N, D) == rel_dist.shape
# Get the vertices
vertices = self.grid.cell_coords_to_vertex_indices(cell_coords)
assert (N, 2 ** D) == vertices.shape
# Calculate multilinear interp weights from distances
weights = np.empty((N, 2 ** D))
for (i, bin_vertex) in enumerate(itertools.product([0, 1], repeat=D)):
vert_mask = np.array(bin_vertex, dtype=bool)
weights[:, i] = np.product(rel_dist[:, vert_mask], axis=1) * np.product(
1.0 - rel_dist[:, ~vert_mask], axis=1
)
point_dist = self.convert_to_sparse_matrix(cell_coords, vertices, weights)
return point_dist
示例2: get_dataset_slice
def get_dataset_slice(self, in_dataset_obj, dataset_info, in_data_idx, out_shape, inp_filename=""):
"""Copys dataset values from one dataset object to another, but only certain indexes along a
specific dimension of the data"""
# Determine how to extact data other than the splice dimension
in_dataset_indexes = dataset_info.input_data_indexes(in_dataset_obj, in_data_idx)
# Obtain selected data for copying into output dataset
try:
if len(in_dataset_indexes) == 1 and not isinstance(in_dataset_indexes[0], slice):
in_data = in_dataset_obj[:][numpy.array(in_dataset_indexes[0])]
else:
in_data = in_dataset_obj[:][tuple(in_dataset_indexes)]
except IOError as exc:
raise IOError("Can not read dataset %s from file %s: %s" % (dataset_info.inp_name, inp_filename, exc))
# Set sliced data into output dataset
if numpy.product(in_data.shape) > numpy.product(out_shape):
self.logger.warning("Dataset %s requires destructive resizing" % (dataset_info.out_name))
self.logger.debug("At indexes %s resizing source data of shape %s to %s." % (in_data_idx, in_data.shape, out_shape))
stored_data = numpy.resize(in_data, out_shape)
else:
stored_data = in_data.reshape(out_shape)
return stored_data
示例3: fftconvolve
def fftconvolve(in1, in2, in3=None, mode="full"):
"""Convolve two N-dimensional arrays using FFT. See convolve.
copied from scipy, but here used to try out inverse filter
doesn't work or I can't get it to work
"""
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
size = s1+s2-1
# Always use 2**n-sized FFT
fsize = 2**np.ceil(np.log2(size))
IN1 = fftn(in1,fsize)
#IN1 *= fftn(in2,fsize)
IN1 /= fftn(in2,fsize) # use inverse filter
# note the inverse is elementwise not matrix inverse
# is this correct, NO doesn't seem to work
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = ifftn(IN1)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if product(s1,axis=0) > product(s2,axis=0):
osize = s1
else:
osize = s2
return _centered(ret,osize)
elif mode == "valid":
return _centered(ret,abs(s2-s1)+1)
示例4: maxprod
def maxprod(data, num):
def diagdirection(largest):
for i in xrange(-Len+num, Len-num+1):
diag = data.diagonal(i)
size = diag.size
for j in xrange(size-num+1):
dp = np.product(diag[j:j+num])
if dp > largest:
largest = dp
return largest
largest = 0
Len = data.shape[0]
for i in xrange(Len):
row = data[i,:]
col = data[:,i]
for j in xrange(Len-num+1):
rp = np.product(row[j:j+num])
cp = np.product(col[j:j+num])
if rp > largest:
largest = rp
if cp > largest:
largest = cp
largest = diagdirection(largest)
data = data[:,::-1]
largest = diagdirection(largest)
return largest
示例5: fftconvolve
def fftconvolve(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT. See convolve.
"""
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
size = s1+s2-1
IN1 = fftn(in1,size)
IN1 *= fftn(in2,size)
ret = ifftn(IN1)
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if product(s1,axis=0) > product(s2,axis=0):
osize = s1
else:
osize = s2
return _centered(ret,osize)
elif mode == "valid":
return _centered(ret,abs(s2-s1)+1)
示例6: weighted_variance
def weighted_variance(image, mask, binary_image):
"""Compute the log-transformed variance of foreground and background
image - intensity image used for thresholding
mask - mask of ignored pixels
binary_image - binary image marking foreground and background
"""
if not np.any(mask):
return 0
#
# Clamp the dynamic range of the foreground
#
minval = np.max(image[mask])/256
if minval == 0:
return 0
fg = np.log2(np.maximum(image[binary_image & mask], minval))
bg = np.log2(np.maximum(image[(~ binary_image) & mask], minval))
nfg = np.product(fg.shape)
nbg = np.product(bg.shape)
if nfg == 0:
return np.var(bg)
elif nbg == 0:
return np.var(fg)
else:
return (np.var(fg) * nfg + np.var(bg)*nbg) / (nfg+nbg)
示例7: predict
def predict(self, context):
if self.dictionary is None or self.parameters is None:
print('Train before predict!')
return
context = context[-self.context_size:]
input = []
for word in context:
if word in self.dictionary:
input.append(self.dictionary[word])
else:
input.append(0)
W_size = np.product(self.W_shape)
U_size = np.product(self.U_shape)
H_size = np.product(self.H_shape)
split_indices = [W_size, W_size + U_size, W_size + U_size + H_size]
W, U, H, C = np.split(self.parameters, split_indices)
W = W.reshape(self.W_shape)
U = U.reshape(self.U_shape)
H = H.reshape(self.H_shape)
C = C.reshape(self.C_shape)
x = np.concatenate([C[input[i]] for i in range(self.context_size)])
x = np.append(x, 1.) # Append bias term
x = x.reshape(-1, 1)
y = W.dot(x) + U.dot(np.tanh(H.dot(x)))
# You don't want to predict unknown words (index 0)
prediction = np.argmax(y[1:]) + 1
return self.reverse_dictionary[prediction]
示例8: SceneTrain
def SceneTrain(NumSub, ImagPerClassToRead, DataPath, TrainImgs, ResizeAmt):
# Read Images and Generate Image Features XNow
for i in range(0,NumSub):
for j in range(0, ImagPerClassToRead):
InitAll.tic()
#print DataPath+str(i+1)+'frame'+str(TrainImgs[i][j])+'.png'
XNow = cv2.imread(DataPath+str(i+1)+'frame'+str(TrainImgs[i][j])+'.png',0)
XNow = cv2.resize(XNow, ResizeAmt, interpolation = cv2.INTER_CUBIC)
XNow = InitAll.ComputeGIST(XNow)
#print("Sub " + str(i+1) + " Image " + str(j+1))
if(i==0 and j==0):
X = np.reshape(XNow, (1,np.product(XNow.shape)))
else:
X = np.vstack((X,np.reshape(XNow, (1,np.product(XNow.shape)))))
InitAll.toc()
print "Subject " + str(i+1) + " done...."
# Now Generate Class Labels Y
# Class labels start from 1 and not 0
Y = [i for i in range(1,NumSub+1)]*ImagPerClassToRead
Y = list(np.sort(Y))
SVMModel = svm.SVC()
SVMModel.fit(X, Y)
# Saving the objects:
with open('SceneTrainedSVMModel'+strftime("%Y-%m-%d %H:%M:%S", gmtime())+'.pickle', 'w') as f:
pickle.dump([X, Y, SVMModel], f)
return SVMModel
示例9: _MakeUnitCellPhasesForT_Restricted
def _MakeUnitCellPhasesForT_Restricted(self, ijkRow, iOpTs):
# <r + T|op| s> = <r + T + S|op|s + S> for any unit-cell translation S,T
# we know the op is only non-zero for a limited range of T, which we
# have given via iOpTs.
#
# we have given the ijkRow (lhs T), and are now looking for the ijkCol
# (right T) for which <r+Tr| op |r+Tc> is non-zero. That means that
# Tr-Tc must lie within the Tc supplied in iOpTs.
# FIXME: do this properly.
if 0:
I = []
PF = []
for ijkOp in (self.iTs[o] for o in iOpTs):
for iTCol,ijkCol in enumerate(self.iTs):
if np.all((ijkCol - ijkRow) % self.Size == ijkOp):
I.append(iTCol)
PF.append(np.product(self.PhaseShift**((ijkCol - ijkRow) / self.Size)))
return np.array(PF), np.array(I)
else:
# well... that's not properly, but atm it's not the main problem in this form.
I = []
PF = []
for ijkOp in (self.iTs[o] for o in iOpTs):
ijkCol = ijkOp + ijkRow
PF.append(np.product(self.PhaseShift**((ijkCol) / self.Size)))
I.append(self.Fixme_iTsLookup[tuple(ijkCol % self.Size)])
return np.array(PF), np.array(I)
示例10: get_srcp
def get_srcp(self, img):
import sourcecounts as sc
fwsig = const.fwsig
cutoff = 5.0
spin = -0.80
freq = img.frequency
bm = (img.beam[0], img.beam[1])
cdelt = img.wcs_obj.acdelt[:2]
x = 2.0*pi*N.product(bm)/abs(N.product(cdelt))/(fwsig*fwsig)*img.omega
smin_L = img.clipped_rms*cutoff*((1.4e9/freq)**spin)
scflux = sc.s
scnum = sc.n
index = 0
for i,s in enumerate(scflux):
if s < smin_L:
index = i
break
n1 = scnum[index]; n2 = scnum[-1]
s1 = scflux[index]; s2 = scflux[-1]
alpha = 1.0-log(n1/n2)/log(s1/s2)
A = (alpha-1.0)*n1/(s1**(1.0-alpha))
source_p = x*A*((cutoff*img.clipped_rms)**(1.0-alpha)) \
/((1.0-alpha)*(1.0-alpha))
return source_p
示例11: check_meanmap
def check_meanmap(self, img, mean):
"""Calculates the statistics of the mean map and decides, when
mean_map=None, whether to take the map (if variance
is significant) or a constant value
"""
from math import sqrt
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Rmsimage.Checkmean ")
cdelt = img.wcs_obj.acdelt[:2]
bm = (img.beam[0], img.beam[1])
fw_pix = sqrt(N.product(bm)/abs(N.product(cdelt)))
if img.masked:
unmasked = N.where(~img.mask_arr)
stdsub = N.std(mean[unmasked])
maxmean = N.max(mean[unmasked])
else:
stdsub = N.std(mean)
maxmean = N.max(mean)
rms_expect = img.clipped_rms/img.rms_box[0]*fw_pix
mylog.debug('%s %10.6f %s' % ('Standard deviation of mean image = ', stdsub*1000.0, 'mJy'))
mylog.debug('%s %10.6f %s' % ('Expected standard deviation = ', rms_expect*1000.0, 'mJy'))
# For mean map, use a higher threshold than for the rms map, as radio images
# should rarely, if ever, have significant variations in the mean
if stdsub > 5.0*rms_expect:
img.mean_map_type = 'map'
mylogger.userinfo(mylog, 'Variation in mean image significant')
else:
if img.confused:
img.mean_map_type = 'zero'
else:
img.mean_map_type = 'const'
mylogger.userinfo(mylog, 'Variation in mean image not significant')
return img
示例12: check_rmsmap
def check_rmsmap(self, img, rms):
"""Calculates the statistics of the rms map and decides, when
rms_map=None, whether to take the map (if variance
is significant) or a constant value
"""
from math import sqrt
mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Rmsimage.Checkrms ")
cdelt = img.wcs_obj.acdelt[:2]
bm = (img.beam[0], img.beam[1])
fw_pix = sqrt(N.product(bm)/abs(N.product(cdelt)))
if img.masked:
unmasked = N.where(~img.mask_arr)
stdsub = N.std(rms[unmasked])
maxrms = N.max(rms[unmasked])
else:
stdsub = N.std(rms)
maxrms = N.max(rms)
rms_expect = img.clipped_rms/sqrt(2)/img.rms_box[0]*fw_pix
mylog.debug('%s %10.6f %s' % ('Standard deviation of rms image = ', stdsub*1000.0, 'mJy'))
mylog.debug('%s %10.6f %s' % ('Expected standard deviation = ', rms_expect*1000.0, 'mJy'))
if stdsub > 1.1*rms_expect:
img.use_rms_map = True
mylogger.userinfo(mylog, 'Variation in rms image significant')
else:
img.use_rms_map = False
mylogger.userinfo(mylog, 'Variation in rms image not significant')
return img
示例13: df_fromdict
def df_fromdict(data, repeat=1):
"""
Produces a factorial DataFrame from a dict or list of tuples.
For example, suppose you want to generate a DataFrame like this::
a b
0 one 0
1 one 1
2 two 0
3 two 1
This function generates such output simply by providing the following:
df_fromdict([('a', ['one', 'two']), ('b', [0, 1])])
:Args:
data: dict or a list of tuples
Data used to produce a DataFrame. Keys specify column names, and
values specify possible (unique) values.
:Kwargs:
repeat: int (default: 1)
How many times everything should be repeated. Useful if you want to
simulate multiple samples of each condition, for example.
:Returns:
pandas.DataFrame with data.items() column names
"""
data = OrderedDict(data)
count = map(len, data.values())
df = {}
for i, (key, vals) in enumerate(data.items()):
rep = np.repeat(vals, np.product(count[i+1:]))
tile = np.tile(rep, np.product(count[:i]))
df[key] = np.repeat(tile, repeat)
df = pandas.DataFrame(df, columns=data.keys())
return df
示例14: fftconvolve
def fftconvolve(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT. See convolve.
"""
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
size = s1 + s2 - 1
# Always use 2**n-sized FFT
fsize = 2 ** np.ceil(np.log2(size))
IN1 = fftn(in1, fsize)
IN1 *= fftn(in2, fsize)
fslice = tuple([slice(0, int(sz)) for sz in size])
ret = ifftn(IN1)[fslice].copy()
del IN1
if not complex_result:
ret = ret.real
if mode == "full":
return ret
elif mode == "same":
if product(s1, axis=0) > product(s2, axis=0):
osize = s1
else:
osize = s2
return _centered(ret, osize)
elif mode == "valid":
return _centered(ret, abs(s2 - s1) + 1)
示例15: plot_hist_rmsRho_Levs012
def plot_hist_rmsRho_Levs012(rho_L0, rho_L1, rho_L2, tSim, fname):
''''''
import pylab as py
#make 6 hists
rms = rho_L0
lab = r"$\rho$, Lev 0"
rms1d = np.reshape(rms, np.product(rms.shape))
logrms= np.log10(rms1d)
cnt,bins,patches = py.hist(logrms, bins=100, color="blue", alpha=0.5, label=lab)
rms = rho_L1
lab = r"$\rho$, Lev 1"
rms1d = np.reshape(rms, np.product(rms.shape))
logrms= np.log10(rms1d)
cnt,bins,patches = py.hist(logrms, bins=100, color="red", alpha=0.5, label=lab)
rms = rho_L2
lab = r"$\rho$, Lev 2"
rms1d = np.reshape(rms, np.product(rms.shape))
logrms= np.log10(rms1d)
#plot quantities
Tratio = tSim / ic.tCr
#plot
cnt,bins,patches = py.hist(logrms, bins=100, color="green", alpha=0.5,label=lab)
py.vlines(np.log10( ic.rho0 ), 0, cnt.max(), colors="black", linestyles='dashed',label=r"$\rho_{0}$ = %2.2g [g/cm^3]" % ic.rho0)
#py.xlim([-13.,-9.])
py.xlabel("Log10 Density [g/cm^3]")
py.ylabel("count")
py.title(r"$T/T_{\rmCross}$ = %g" % Tratio)
py.legend(loc=0, fontsize="small")
py.savefig(fname,format="pdf")
py.close()