本文整理汇总了Python中scipy.isinf函数的典型用法代码示例。如果您正苦于以下问题:Python isinf函数的具体用法?Python isinf怎么用?Python isinf使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了isinf函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: LDA_batch_normalization
def LDA_batch_normalization(dataset, sample_table, batch_col, output_folder, ncomps): # this is actually the batch normalization method
tmp_output_folder = os.path.join(output_folder, 'tmp')
if not os.path.isdir(tmp_output_folder):
os.makedirs(tmp_output_folder)
barcodes, filtered_conditions, filtered_matrix, conditions, matrix = dataset
# Remove any remaining NaNs and Infs from the filtered matrix - they would screw
# up the LDA.
filtered_matrix[scipy.isnan(filtered_matrix)] = 0
filtered_matrix[scipy.isinf(filtered_matrix)] = 0
# For full matrix, also eliminate NaNs and Infs, BUT preserve the indices and values
# so they can be added back into the matrix later (not implemented yet, and may never
# be - there should no longer be NaNs and Infs in the dataset)
# The NaNs and Infs will mess up the final step of the MATLAB LDA script, which uses
# matrix multiplication to remove the specified number of components!
matrix_nan_inds = scipy.isnan(matrix)
matrix_nan_vals = matrix[matrix_nan_inds]
matrix_inf_inds = scipy.isinf(matrix)
matrix_inf_vals = matrix[matrix_inf_inds]
matrix[matrix_nan_inds] = 0
matrix[matrix_inf_inds] = 0
# Save both the small matrix (for determining the components to remove) and the
# full matrix for the matlab script
filtered_matrix_tmp_filename = os.path.join(tmp_output_folder, 'nonreplicating_matrix.txt')
full_matrix_tmp_filename = os.path.join(tmp_output_folder, 'full_matrix.txt')
np.savetxt(filtered_matrix_tmp_filename, filtered_matrix)
np.savetxt(full_matrix_tmp_filename, matrix)
# Map the batch to integers for matlab, and write out to a file so matlab can read
# Note that yes, the batch_classes should match up with the filtered matrix, not
# the full matrix
batch_classes = get_batch_classes(dataset = [barcodes, filtered_conditions, filtered_matrix], sample_table = sample_table, batch_col = batch_col)
class_tmp_filename = os.path.join(tmp_output_folder, 'classes.txt')
writeList(batch_classes, class_tmp_filename)
output_tmp_filename = os.path.join(tmp_output_folder, 'full_matrix_lda_normalized.txt')
runLDAMatlabFunc(filtered_matrix_filename = filtered_matrix_tmp_filename, \
matrix_filename = full_matrix_tmp_filename, \
class_filename = class_tmp_filename, \
ncomps = ncomps, \
output_filename = output_tmp_filename)
# The X norm that is returned is the full matrix. In the future, we could add in
# returning the components to remove so they can be visualized or applied to other
# one-off datasets
Xnorm = scipy.genfromtxt(output_tmp_filename)
## Dump the dataset out!
#output_filename = os.path.join(mtag_effect_folder, 'scaleddeviation_full_mtag_lda_{}.dump.gz'.format(ncomps))
#of = gzip.open(output_filename, 'wb')
#cPickle.dump([barcodes, conditions, Xnorm], of)
#of.close()
return [barcodes, conditions, Xnorm]
示例2: lapnormadj
def lapnormadj(A):
"""
Function to perform Laplacian Normalization on m x n matrix
:param A: Adjacency Matrix
:return: Laplace normalised matrix
"""
import scipy
import numpy as np
from scipy.sparse import csgraph
n,m = A.shape
d1 = A.sum(axis=1).flatten()
d2 = A.sum(axis=0).flatten()
d1_sqrt = 1.0/scipy.sqrt(d1)
d2_sqrt = 1.0/scipy.sqrt(d2)
d1_sqrt[scipy.isinf(d1_sqrt)] = 10000
d2_sqrt[scipy.isinf(d2_sqrt)] = 10000
la = np.zeros(shape=(n,m))
for i in range(0,n):
for j in range(0,m):
la[i,j] = A[i,j]/(d1_sqrt[i]*d2_sqrt[j])
#D1 = scipy.sparse.spdiags(d1_sqrt, [0], n,m, format='coo')
#D2 = scipy.sparse.spdiags(d2_sqrt, [0], n,m, format='coo')
la[la < 1e-5] = 0
return scipy.sparse.coo_matrix(la)
示例3: LDA_batch_normalization
def LDA_batch_normalization(dataset, sample_table, batch_col, output_folder, n_comps): # this is actually the batch normalization method
tmp_output_folder = os.path.join(output_folder, 'tmp')
if not os.path.isdir(tmp_output_folder):
os.makedirs(tmp_output_folder)
barcodes, filtered_conditions, filtered_matrix, conditions, matrix = dataset
# Remove any remaining NaNs and Infs from the filtered matrix - they would screw
# up the LDA.
filtered_matrix[scipy.isnan(filtered_matrix)] = 0
filtered_matrix[scipy.isinf(filtered_matrix)] = 0
# For full matrix, also eliminate NaNs and Infs, BUT preserve the indices and values
# so they can be added back into the matrix later (not implemented yet, and may never
# be - there should no longer be NaNs and Infs in the dataset)
# The NaNs and Infs will mess up the final step of the MATLAB LDA script, which uses
# matrix multiplication to remove the specified number of components!
matrix_nan_inds = scipy.isnan(matrix)
matrix_nan_vals = matrix[matrix_nan_inds]
matrix_inf_inds = scipy.isinf(matrix)
matrix_inf_vals = matrix[matrix_inf_inds]
matrix[matrix_nan_inds] = 0
matrix[matrix_inf_inds] = 0
# Save both the small matrix (for determining the components to remove) and the
# full matrix for the matlab script
filtered_matrix_tmp_filename = os.path.join(tmp_output_folder, 'nonreplicating_matrix.txt')
full_matrix_tmp_filename = os.path.join(tmp_output_folder, 'full_matrix.txt')
np.savetxt(filtered_matrix_tmp_filename, filtered_matrix)
np.savetxt(full_matrix_tmp_filename, matrix)
# Map batch classes to integers
batch_classes = get_batch_classes(dataset = [barcodes, filtered_conditions, filtered_matrix], sample_table = sample_table, batch_col = batch_col)
# Checks number of classes and limits ncomps
a = [x > 0 for x in np.sum(np.absolute(filtered_matrix), axis=0)]
classes = np.asarray([batch_classes[i] for i in range(len(batch_classes)) if a[i]])
n_samples = filtered_matrix.shape[0]
n_classes = len(np.unique(classes))
if n_samples == n_classes:
print "ERROR: The number of samples is equal to the number of classes. Exiting"
if n_classes <= n_comps:
print "Fewer classes, " + str(n_classes) + ", than components. Setting components to " + str(n_classes-1)
n_comps = n_classes-1
# Runs LDA
#Xnorm = scikit_lda(filtered_matrix, matrix, batch_classes, n_comps)
Xnorm = outer_python_lda(filtered_matrix, matrix, batch_classes, n_comps)
return [barcodes, conditions, Xnorm, n_comps]
示例4: setRunParams
def setRunParams(self, ic=[], params=[], t0=[], tend=[], gt0=[], refine=0,
specTimes=[], bounds=[]):
if not self.initBasic:
raise InitError, 'You must initialize the integrator before setting params. (initBasic)'
#if self.initParams == True:
# raise InitError, 'You must clear params before setting them. Use clearParams()'
if self.checkRunParams(ic, params, t0, tend, gt0, refine, specTimes,
bounds):
self.ic = ic
self.params = params
self.t0 = float(t0)
self.tend = float(tend)
self.gt0 = float(gt0)
self.refine = int(refine)
self.specTimes = specTimes
if self.t0 < self.tend:
self.direction = 1
else:
self.direction = -1
# Set bounds
if bounds != []:
self.upperBounds = bounds[1]
self.lowerBounds = bounds[0]
for i in range(self.phaseDim + self.paramDim):
if isinf(self.upperBounds[i]) and self.upperBounds[i] > 0:
self.upperBounds[i] = abs(float(self.defaultBound))
elif isinf(self.upperBounds[i]) and self.upperBounds[i] < 0:
self.upperBounds[i] = -abs(float(self.defaultBound))
if isinf(self.lowerBounds[i]) and self.lowerBounds[i] > 0:
self.lowerBounds[i] = abs(float(self.defaultBound))
elif isinf(self.lowerBounds[i]) and self.lowerBounds[i] < 0:
self.lowerBounds[i] = -abs(float(self.defaultBound))
else:
self.upperBounds = [abs(float(self.defaultBound)) for x in range(self.phaseDim + self.paramDim)]
self.lowerBounds = [-abs(float(self.defaultBound)) for x in range(self.phaseDim + self.paramDim)]
retval = self._integMod.SetRunParameters(self.ic, self.params,
self.gt0, self.t0, self.tend, self.refine,
len(self.specTimes), self.specTimes,
self.upperBounds, self.lowerBounds)
if retval[0] != 1:
raise InitError, 'SetRunParameters call failed!'
self.canContinue = False
self.setParams = True
示例5: sigmoid
def sigmoid(X):
## e = sp.exp(-X)
## e = 0.0000001 if e ==
v = 1. / (1. + sp.exp(-X))
if sp.isnan(v).sum() or sp.isinf(v).sum():
i=0
return v
示例6: matfile_featfunc
def matfile_featfunc(fname,
suffix,
kernel_type = DEFAULT_KERNEL_TYPE,
variable_name = DEFAULT_VARIABLE_NAME):
fname += suffix
error = False
try:
if kernel_type == "exp_mu_da":
# hack for GB with 204 dims
fdata = io.loadmat(fname)[variable_name].reshape(-1, 204)
else:
fdata = io.loadmat(fname)[variable_name].ravel()
except TypeError:
fname_error = fname+'.error'
print "[ERROR] couldn't open", fname, "moving it to", fname_error
shutil.move(fname, fname_error)
error = True
except:
print "[ERROR] (unknown) with", fname
raise
if error:
raise RuntimeError("An error occured while loading '%s'"
% fname)
assert(not sp.isnan(fdata).any())
assert(not sp.isinf(fdata).any())
return fdata
示例7: _process_image
def _process_image(self, fname):
kernel_type = self.kernel_type
variable_name = self.variable_name
fname += self.input_suffix
error = False
try:
if kernel_type == "exp_mu_da":
# hack for GB with 204 dims
# fdata = io.loadmat(fname)[variable_name].reshape(-1, 204)
fdata = self._load_image(fname).reshape(-1, 204)
else:
fdata = self._load_image(fname).ravel()
# fdata = io.loadmat(fname)[variable_name].ravel()
except TypeError:
fname_error = fname + ".error"
print "[ERROR] couldn't open", fname, "moving it to", fname_error
# os.unlink(fname)
shutil.move(fname, fname_error)
error = True
except:
print "[ERROR] (unknown) with", fname
raise
if error:
raise RuntimeError("An error occured while loading '%s'" % fname)
assert not sp.isnan(fdata).any()
assert not sp.isinf(fdata).any()
return fdata
示例8: evaluer
def evaluer(self):
""" Renvoie une valeur numérique de l'expression
"""
# On crée un dictionnaire de variables : {'nom' : valeur}
# (nécessaire pour "eval")
dict = {}
for n, v in self.vari.items():
print " ", n, v
dict[n] = v.v[0]
global safe_dict
dict.update(safe_dict)
# On fait l'évaluation
try:
v = eval(self.py_expr, {"__builtins__": None}, dict)
except:
return False
# print type (v)
# On analyse le résultat
if not type(v) == float and not type(v) == scipy.float64 and not type(v) == int:
return False
elif scipy.isinf(v) or scipy.isnan(v):
return None
else:
return v
示例9: mmse_stsa
def mmse_stsa(infile, outfile, noise_sum):
signal, params = read_signal(infile, WINSIZE)
nf = len(signal)/(WINSIZE/2) - 1
sig_out=sp.zeros(len(signal),sp.float32)
G = sp.ones(WINSIZE)
prevGamma = G
alpha = 0.98
window = sp.hanning(WINSIZE)
gamma15=spc.gamma(1.5)
lambdaD = noise_sum / 5.0
percentage = 0
for no in xrange(nf):
p = int(math.floor(1. * no / nf * 100))
if (p > percentage):
percentage = p
print "{}%".format(p),
y = get_frame(signal, WINSIZE, no)
Y = sp.fft(y*window)
Yr = sp.absolute(Y)
Yp = sp.angle(Y)
gamma = Yr**2/lambdaD
xi = alpha * G**2 * prevGamma + (1-alpha)*sp.maximum(gamma-1, 0)
prevGamma = gamma
nu = gamma * xi / (1+xi)
G = (gamma15 * sp.sqrt(nu) / gamma ) * sp.exp(-nu/2) * ((1+nu)*spc.i0(nu/2)+nu*spc.i1(nu/2))
idx = sp.isnan(G) + sp.isinf(G)
G[idx] = xi[idx] / (xi[idx] + 1)
Yr = G * Yr
Y = Yr * sp.exp(Yp*1j)
y_o = sp.real(sp.ifft(Y))
add_signal(sig_out, y_o, WINSIZE, no)
write_signal(outfile, params, sig_out)
示例10: run
def run(self,phase=None):
r'''
'''
logger.warning('This algorithm can take some time...')
graph = self._net.create_adjacency_matrix(data=self._net['throat.length'],sprsfmt='csr')
if phase is not None:
self._phase = phase
if 'throat.occupancy' in self._phase.props():
temp = self._net['throat.length']*(self._phase['throat.occupancy']==1)
graph = self._net.create_adjacency_matrix(data=temp,sprsfmt='csr',prop='temp')
#self._net.tic()
path = spgr.shortest_path(csgraph = graph, method='D', directed = False)
#self._net.toc()
Px = sp.array(self._net['pore.coords'][:,0],ndmin=2)
Py = sp.array(self._net['pore.coords'][:,1],ndmin=2)
Pz = sp.array(self._net['pore.coords'][:,2],ndmin=2)
Cx = sp.square(Px.T - Px)
Cy = sp.square(Py.T - Py)
Cz = sp.square(Pz.T - Pz)
Ds = sp.sqrt(Cx + Cy + Cz)
temp = path/Ds
#temp = path
temp[sp.isnan(temp)] = 0
temp[sp.isinf(temp)] = 0
return temp
示例11: run
def run(self, phase=None, throats=None):
logger.warning('This algorithm can take some time...')
conduit_lengths = sp.sum(misc.conduit_lengths(network=self._net,
mode='centroid'), axis=1)
graph = self._net.create_adjacency_matrix(data=conduit_lengths,
sprsfmt='csr')
if phase is not None:
self._phase = phase
if 'throat.occupancy' in self._phase.props():
temp = conduit_lengths*(self._phase['throat.occupancy'] == 1)
graph = self._net.create_adjacency_matrix(data=temp,
sprsfmt='csr',
prop='temp')
path = spgr.shortest_path(csgraph=graph, method='D', directed=False)
Px = sp.array(self._net['pore.coords'][:, 0], ndmin=2)
Py = sp.array(self._net['pore.coords'][:, 1], ndmin=2)
Pz = sp.array(self._net['pore.coords'][:, 2], ndmin=2)
Cx = sp.square(Px.T - Px)
Cy = sp.square(Py.T - Py)
Cz = sp.square(Pz.T - Pz)
Ds = sp.sqrt(Cx + Cy + Cz)
temp = path / Ds
temp[sp.isnan(temp)] = 0
temp[sp.isinf(temp)] = 0
return temp
示例12: sample
def sample(self, model, evidence):
z = evidence['z']
T = evidence['T']
g = evidence['g']
h = evidence['h']
transition_var_g = evidence['transition_var_g']
shot_id = evidence['shot_id']
observation_var_g = model.known_params['observation_var_g']
observation_var_h = model.known_params['observation_var_h']
prior_mu_g = model.hyper_params['g']['mu']
prior_cov_g = model.hyper_params['g']['cov']
N = len(z)
n = len(g)
## Make g, h, and z vector valued to avoid ambiguity
#g = g.copy().reshape((n, 1))
#h = h.copy().reshape((n, 1))
#
pdb.set_trace()
z_g = ma.asarray(nan + zeros(n))
obs_cov = ma.asarray(inf + zeros(n))
if 1 in T:
z_g[T==1] = z[T==1]
obs_cov[T==1] = observation_var_g
if 2 in T:
z_g[T==2] = z[T==2] - h[T==2]
obs_cov[T==2] = observation_var_h
#for i in xrange(n):
# z_i = z[shot_id == i]
# T_i = T[shot_id == i]
# if 1 in T_i and 2 in T_i:
# # Sample mean and variance for multiple observations
# n_obs_g, n_obs_h = sum(T_i == 1), sum(T_i == 2)
# obs_cov_g, obs_cov_h = observation_var_g/n_obs_g, observation_var_h/n_obs_h
# z_g[i] = (mean(z_i[T_i == 1])/obs_cov_g + mean(z_i[T_i == 2] - h[i])/obs_cov_h)/(1/obs_cov_g + 1/obs_cov_h)
# obs_cov[i] = 1/(1/obs_cov_g + 1/obs_cov_h)
# elif 1 in T_i:
# n_obs_g = sum(T_i == 1)
# z_g[i] = mean(z_i[T_i == 1])
# obs_cov[i] = observation_var_g/n_obs_g
# elif 2 in T_i:
# n_obs_h = sum(T_i == 2)
# z_g[i] = mean(z_i[T_i == 2] - h[i])
# obs_cov[i] = observation_var_h/n_obs_h
z_g[isnan(z_g)] = ma.masked
obs_cov[isinf(obs_cov)] = ma.masked
kalman = self._kalman
kalman.initial_state_mean = array([prior_mu_g[0],])
kalman.initial_state_covariance = array([prior_cov_g[0],])
kalman.transition_matrices = eye(1)
kalman.transition_covariance = array([transition_var_g,])
kalman.observation_matrices = eye(1)
kalman.observation_covariance = obs_cov
sampled_g = forward_filter_backward_sample(kalman, z_g, prior_mu_g, prior_cov_g)
return sampled_g.reshape((n,))
示例13: _oneEvaluation
def _oneEvaluation(self, evaluable):
""" This method should be called by all optimizers for producing an evaluation. """
if self._wasUnwrapped:
self.wrappingEvaluable._setParameters(evaluable)
res = self.__evaluator(self.wrappingEvaluable)
elif self._wasWrapped:
res = self.__evaluator(evaluable.params)
else:
res = self.__evaluator(evaluable)
''' added by JPQ '''
if self.constrained :
self.feasible = self.__evaluator.outfeasible
self.violation = self.__evaluator.outviolation
# ---
if isscalar(res):
# detect numerical instability
if isnan(res) or isinf(res):
raise DivergenceError
# always keep track of the best
if (self.numEvaluations == 0
or self.bestEvaluation is None
or (self.minimize and res <= self.bestEvaluation)
or (not self.minimize and res >= self.bestEvaluation)):
self.bestEvaluation = res
self.bestEvaluable = evaluable.copy()
self.numEvaluations += 1
# if desired, also keep track of all evaluables and/or their fitness.
if self.storeAllEvaluated:
if self._wasUnwrapped:
self._allEvaluated.append(self.wrappingEvaluable.copy())
elif self._wasWrapped:
self._allEvaluated.append(evaluable.params.copy())
else:
self._allEvaluated.append(evaluable.copy())
if self.storeAllEvaluations:
if self._wasOpposed and isscalar(res):
''' added by JPQ '''
if self.constrained :
self._allEvaluations.append([-res,self.feasible,self.violation])
# ---
else:
self._allEvaluations.append(-res)
else:
''' added by JPQ '''
if self.constrained :
self._allEvaluations.append([res,self.feasible,self.violation])
# ---
else:
self._allEvaluations.append(res)
''' added by JPQ '''
if self.constrained :
return [res,self.feasible,self.violation]
else:
# ---
return res
示例14: makehist
def makehist(testpath,npulses):
"""
This functions are will create histogram from data made in the testpath.
Inputs
testpath - The path that the data is located.
npulses - The number of pulses in the sim.
"""
sns.set_style("whitegrid")
sns.set_context("notebook")
params = ['Ne', 'Te', 'Ti', 'Vi']
histlims = [[1e10, 3e11], [1000., 3000.], [100., 2500.], [-400., 400.]]
erlims = [[-2e11, 2e11], [-1000., 1000.], [-800., 800], [-400., 400.]]
erperlims = [[-100., 100.]]*4
lims_list = [histlims, erlims, erperlims]
errdict = makehistdata(params, testpath)[:4]
ernames = ['Data', 'Error', 'Error Percent']
# Two dimensiontal histograms
pcombos = [i for i in itertools.combinations(params, 2)]
c_rows = int(math.ceil(float(len(pcombos))/2.))
(figmplf, axmat) = plt.subplots(c_rows, 2, figsize=(12, c_rows*6), facecolor='w')
axvec = axmat.flatten()
for icomn, icom in enumerate(pcombos):
curax = axvec[icomn]
str1, str2 = icom
_, _, _ = make2dhist(testpath, PARAMDICT[str1], PARAMDICT[str2], figmplf, curax)
filetemplate = str(Path(testpath).joinpath('AnalysisPlots', 'TwoDDist'))
plt.tight_layout()
plt.subplots_adjust(top=0.95)
figmplf.suptitle('Pulses: {0}'.format(npulses), fontsize=20)
fname = filetemplate+'_{0:0>5}Pulses.png'.format(npulses)
plt.savefig(fname)
plt.close(figmplf)
# One dimensiontal histograms
for ierr, iername in enumerate(ernames):
filetemplate = str(Path(testpath).joinpath('AnalysisPlots', iername))
(figmplf, axmat) = plt.subplots(2, 2, figsize=(20, 15), facecolor='w')
axvec = axmat.flatten()
for ipn, iparam in enumerate(params):
plt.sca(axvec[ipn])
if sp.any(sp.isinf(errdict[ierr][iparam])):
continue
binlims = lims_list[ierr][ipn]
bins = sp.linspace(binlims[0], binlims[1], 100)
xdata = errdict[ierr][iparam]
xlog = sp.logical_and(xdata >= binlims[0], xdata < binlims[1])
histhand = sns.distplot(xdata[xlog], bins=bins, kde=True, rug=False)
axvec[ipn].set_title(iparam)
figmplf.suptitle(iername +' Pulses: {0}'.format(npulses), fontsize=20)
fname = filetemplate+'_{0:0>5}Pulses.png'.format(npulses)
plt.savefig(fname)
plt.close(figmplf)
示例15: score_image
def score_image(self, image):
"""
This finds whether the image is cloudy or not.
:param image:
:return:
"""
pickle_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'cache/ck_cloud.p')
# Load the cloud thresholds
[cloudy_model, partly_cloudy_model, clear_model] = pickle.load(open(pickle_file, "rb"))
mean, std, window_size = self.process_image(image)
p = self.fit_model(window_size, std)
#p = self.fit_model(window_size, mean)
# rebuild the functions of the window range,
# find the residual vector and then the euclidean norm.
# the one with the smallest should be the model.
fitfunc = lambda p, x: p[0] * x ** p[1]
clear_residual = scipy.absolute(fitfunc(p, window_size) - fitfunc(clear_model, window_size))
pc_residual = scipy.absolute(fitfunc(p, window_size) - fitfunc(partly_cloudy_model, window_size))
cloudy_residual = scipy.absolute(fitfunc(p, window_size) - fitfunc(cloudy_model, window_size))
clear_residual[scipy.isinf(clear_residual)] = 0.0
clear_residual[scipy.isnan(clear_residual)] = 0.0
pc_residual[scipy.isinf(pc_residual)] = 0.0
pc_residual[scipy.isnan(pc_residual)] = 0.0
cloudy_residual[scipy.isinf(cloudy_residual)] = 0.0
cloudy_residual[scipy.isnan(cloudy_residual)] = 0.0
clear_norm = scipy.linalg.norm(clear_residual)
pc_norm = scipy.linalg.norm(pc_residual)
cloudy_norm = scipy.linalg.norm(cloudy_residual)
smallest_val = [clear_norm, pc_norm, cloudy_norm].index(min([clear_norm, pc_norm, cloudy_norm]))
lg.debug('score :: ' + str(smallest_val))
return smallest_val