本文整理汇总了Python中model.model函数的典型用法代码示例。如果您正苦于以下问题:Python model函数的具体用法?Python model怎么用?Python model使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了model函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: lnlikeHF
def lnlikeHF(pars, samples, obs, u, extra=False):
'''
Generic likelihood function for importance sampling with any number of
dimensions.
Now with added jitter parameter (hierarchical)
obs should be a 2d array of observations. shape = (ndims, nobs)
u should be a 2d array of uncertainties. shape = (ndims, nobs)
samples is a 3d array of samples. shape = (ndims, nobs, nsamp)
if extra == True, the sigma has both a slope and intercept
'''
ndims, nobs, nsamp = samples.shape
ll = np.zeros((nobs, nsamp*nobs))
for i in range(nobs):
if extra:
inv_sigma2 = 1.0/(u[1, :][i]**2 + \
pars[2]**2 + pars[3] * obs[0, :][i])
else:
inv_sigma2 = 1.0/(u[1, :][i]**2 + \
(pars[2]*model1(pars, obs[0, :][i]))**2)
ll[i, :] = -.5*((obs[1, :][i] - model(pars, samples))**2*inv_sigma2) \
+ np.log(inv_sigma2)
loglike = np.sum(np.logaddexp.reduce(ll, axis=1))
if np.isfinite(loglike):
return loglike, loglike
return -np.inf, None
示例2: init
def init(self):
self.y_d = self.parms.y_d.copy()
# Load Models
self.models = list()
for nm in xrange(int(self.parms.num_models)):
self.models.append(mdl.model(self.parms))
# Load multimodel setup
self.multimodels = mmdl.multimodel(float(self.parms.NinSoftmax),float(self.parms.Nout),self.parms.num_models,self.parms.eta_s, self.parms.eta_g)
self.sdpos = np.zeros((self.parms.Nout,))
# Reading and setting al the initial parameters
t1 = self.parms.t1
dt = self.parms.dt
delta = self.parms.delta
self.sysInp = np.zeros((self.parms.sNin, t1 / dt + delta + 1))
self.sysInp2 = np.zeros((self.parms.Nout, t1 / dt + delta + 1))
self.sysOutp = np.zeros((self.parms.Nout, t1 / dt + delta + 1))
self.eeVal = self.sysInp
self.inited = 1
self.model_e = np.zeros((self.parms.Nout, self.parms.num_models))
self.model_outputs = np.zeros((self.parms.Nout, self.parms.num_models,t1 / dt + delta + 1))
self.h = np.zeros((self.parms.num_models,t1 / dt + delta + 1))
self.model_doutputs = np.zeros((self.parms.Nout, self.parms.num_models))
self.i = delta
self.t = 0
示例3: keyring_present_type
def keyring_present_type(**kwargs):
"""
Check if keyring exists on disk
CLI Example:
salt '*' sesceph.keyring_admin_save \\
'[mon.]\n\tkey = AQA/vZ9WyDwsKRAAxQ6wjGJH6WV8fDJeyzxHrg==\n\tcaps mon = \"allow *\"\n' \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
Notes:
cluster_uuid
Set the cluster UUID. Defaults to value found in ceph config file.
cluster_name
Set the cluster name. Defaults to "ceph".
keyring_type
Set the keyring type
"""
keyring_type = kwargs.get("keyring_type")
if (keyring_type is None):
raise Error("keyring_type is None")
m = model.model(**kwargs)
u = mdl_updater.model_updater(m)
u.hostname_refresh()
try:
u.defaults_refresh()
except:
pass
keyobj = keyring.keyring_facard(m)
keyobj.key_type = keyring_type
return keyobj.present()
示例4: build_agg_snapshots
def build_agg_snapshots(self, db='./database'):
conn = sqlite3.connect(db)
c = conn.cursor()
# force model rebuild if created within last n days.
created_within = 7
# select models which require rebuilding.
cmd = c.execute('select mid, uid, version, created_on, last_updated_on, agg_fx, granularity \
from model \
where julianday(current_date) - julianday(last_updated_on) >= expires_in or \
rebuild = 1 and julianday(current_date) - julianday(last_updated_on) >= earliest_rebuild or \
julianday(current_date) - julianday(created_on) <= ?', (created_within,))
r = cmd.fetchall()
for m in r:
mdl = model(m[0], m[2], m[6])
# update model # && force deletion existing aggregrate snapshot.
c.execute('delete from agg_location_snapshot where mid = ?', (mdl.id(), ))
mdl.inc_version()
c.execute('update model \
set version = ?, last_updated_on = current_date, \
nr_matches = 0, nr_successful = 0, streak = 0, last10 = 0 \
where mid = ?', (mdl.version(), mdl.id()))
# rebuid aggregrate snapshot.
self.__build_agg_snapshot(mdl, c)
c.close()
conn.commit()
conn.close()
示例5: pool_del
def pool_del(pool_name, **kwargs):
"""
List all cephx authorization keys
CLI Example:
salt '*' sesceph.pool_del pool_name \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
Notes:
cluster_name
Set the cluster name. Defaults to "ceph".
cluster_uuid
Set the cluster UUID. Defaults to value found in ceph config file.
"""
m = model.model(**kwargs)
u = mdl_updater.model_updater(m)
u.hostname_refresh()
u.defaults_refresh()
u.load_confg(m.cluster_name)
u.mon_members_refresh()
mur = mdl_updater_remote.model_updater_remote(m)
can_connect = mur.connect()
if not can_connect:
raise Error("Cant connect to cluster.")
mur.pool_list()
return mur.pool_del(pool_name)
示例6: keyring_auth_list
def keyring_auth_list(**kwargs):
"""
List all cephx authorization keys
CLI Example:
salt '*' sesceph.auth_list \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
Notes:
cluster_name
Set the cluster name. Defaults to "ceph".
cluster_uuid
Set the cluster UUID. Defaults to value found in ceph config file.
"""
m = model.model(**kwargs)
u = mdl_updater.model_updater(m)
u.hostname_refresh()
try:
u.defaults_refresh()
except:
return {}
u.load_confg(m.cluster_name)
u.mon_members_refresh()
u.auth_list()
p = presenter.mdl_presentor(m)
return p.auth_list()
示例7: lnlikeHFM
def lnlikeHFM(pars, samples, obs, u, extra=False):
'''
Generic likelihood function for importance sampling with any number of
dimensions.
Now with added jitter parameter (hierarchical)
obs should be a 2d array of observations. shape = (ndims, nobs)
u should be a 2d array of uncertainties. shape = (ndims, nobs)
samples is a 3d array of samples. shape = (ndims, nobs, nsamp)
if extra == True, the sigma has both a slope and intercept
Now with a mixture model!
'''
ndims, nobs, nsamp = samples.shape
ypred = model(pars, samples)
yobs = obs[1, :]
xobs = obs[0, :]
yerr = u[1, :]
ll1 = np.zeros((nobs, nsamp*nobs))
ll2 = np.zeros((nobs, nsamp*nobs))
Y, V, P = pars[3], pars[4], pars[5]
for i in range(nobs):
if extra:
inv_sigma2 = 1.0/(yerr[i]**2 + \
(pars[2] + pars[3] * model1(pars, xobs[i]))**2 + V)
else:
inv_sigma2 = 1.0/(yerr[i]**2 + \
(pars[2]*model1(pars, xobs[i]))**2 + V)
ll1[i, :] = -.5*((yobs[i] - ypred)**2*inv_sigma2) + np.log(inv_sigma2)
ll2[i, :] = -.5*((yobs[i] - Y)**2*inv_sigma2) + np.log(inv_sigma2)
lnlike1 = np.logaddexp.reduce(ll1, axis=1)
lnlike2 = np.logaddexp.reduce(ll2, axis=1)
loglike = np.sum(np.logaddexp(np.log(1-P) + lnlike1, np.log(P) + lnlike2))
if np.isfinite(loglike):
return loglike
return -np.inf
示例8: __raxml_to_model
def __raxml_to_model(self, model_name, param_names,args,s_size):
raxmlmodelstr=self.__model_to_raxmodelstr(model_name,param_names)
##think about changing this so it catches crashes and continues
rax_cmd=Popen('{raxpath} -f e -m {modelstr} -s {phy_file} -t {starting_tree} -n {output_names} -w {out_path}/{model_name}'.format(
raxpath=self.raxml_path,
modelstr=raxmlmodelstr,
output_names=param_names,
out_path=self.output,
model_name=model_name,
phy_file=self.ifile_name,
starting_tree=self.stree),
shell=True,
stdout=PIPE,
stderr=PIPE)
rax_out = rax_cmd.communicate()
full_name=model_name+param_names
self.raxstdout+='='*80+'\n'+full_name.center(80)+'\n'+'='*80+'\n'
self.raxstdout+=rax_out[0]
self.raxstderr+='='*80+'\n'+full_name.center(80)+'\n'+'='*80+'\n'
self.raxstderr+=rax_out[0]
new_model=model(model_name=model_name,
param_names=param_names,
rax_name=raxmlmodelstr,
args=self.args,
s_size=s_size
)
return new_model
示例9: main
def main(hps):
# Initialize Horovod.
hvd.init()
# Create tensorflow session
sess = tensorflow_session()
# Download and load dataset.
tf.set_random_seed(hvd.rank() + hvd.size() * hps.seed)
np.random.seed(hvd.rank() + hvd.size() * hps.seed)
# Get data and set train_its and valid_its
train_iterator, test_iterator, data_init = get_data(hps, sess)
hps.train_its, hps.test_its, hps.full_test_its = get_its(hps)
# Create log dir
logdir = os.path.abspath(hps.logdir) + "/"
if not os.path.exists(logdir):
os.mkdir(logdir)
# Create model
import model
model = model.model(sess, hps, train_iterator, test_iterator, data_init)
# Initialize visualization functions
visualise = init_visualizations(hps, model, logdir)
if not hps.inference:
# Perform training
train(sess, model, hps, logdir, visualise)
else:
infer(sess, model, hps, test_iterator)
示例10: __init__
def __init__(self):
self.M = model()
self.V = view()
self.M.setPaintCallback(self.Paint)
self.M.setMessageCallback(self.systemMessage)
self.mkLoad()
self.mkSelect()
self.mkFunction()
self.mkDatabus()
self.mkAddressbus()
self.mkMemory()
self.mkPGM()
self.mkProgramControls()
self.mkHALT()
self.Paint()
m = "\n\n WELCOME TO VIRTUAL MACHINE\n"
m = m + " Memory: " + str(len(self.M.MEMORY.values())) + " bytes\n"
m = m + " ==========================================================\n"
m = m + "Press README on left...."
self.systemMessage(m)
self.V.mainloop()
示例11: cluster_quorum
def cluster_quorum(**kwargs):
"""
Get the cluster status
CLI Example:
salt '*' sesceph.cluster_status \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
Notes:
Get the cluster quorum status.
Scope:
Cluster wide
Arguments:
cluster_uuid
Set the cluster UUID. Defaults to value found in ceph config file.
cluster_name
Set the cluster name. Defaults to "ceph".
"""
m = model.model(**kwargs)
u = mdl_updater.model_updater(m)
u.hostname_refresh()
u.defaults_refresh()
u.load_confg(m.cluster_name)
u.mon_members_refresh()
mur = mdl_updater_remote.model_updater_remote(m)
can_connect = mur.connect()
if not can_connect:
return False
q = mdl_query.mdl_query(m)
return q.cluster_quorum()
示例12: main
def main(weights_path, base_path, base_file, style_path, style_file,
combo_path, img_width, img_height, iterations):
result_prefix = base_file[:-4] + '_' + style_file[:-4]
base_img_path = base_path + base_file
style_img_path = style_path + style_file
# get tensor representations of images
base_img = K.variable(preprocess_image(base_img_path,
img_width,
img_height))
style_img = K.variable(preprocess_image(style_img_path,
img_width,
img_height))
combo_img = K.placeholder((1, 3, img_width, img_height))
# combine the 3 images into a single Keras tensor
input_tensor = K.concatenate([base_img, style_img, combo_img],
axis=0)
print('Creating painting of {} in the style of {}'.format(base_file[:-4],
style_file[:-4]))
print('Loading model with VGG16 network weights...')
nn = model(weights_path, input_tensor, img_width, img_height)
loss, grads = calc_loss_grad(nn, combo_img, img_width, img_height)
evaluate = Evaluator(loss, grads, combo_img, img_width, img_height)
return optimizer(evaluate, img_width, img_height, combo_path,
result_prefix, iterations=iterations)
示例13: main
def main():
sample_size = int(sys.argv[1])
train = data_io.read_train()
print("Data Size:")
print(train.shape)
feature_eng(train)
## originally sample size = 100000
train_sample = train[:sample_size]
## Train the booking model
for i in range(0,2):
if i==0:
model_name = "Booking"
response_name = "booking_bool"
isBook = True
else:
model_name = "Click"
response_name = "click_bool"
isBook = False
print("Training the "+model_name+" Classifier...")
tstart = datetime.now()
feature_names = get_features(train_sample, isBook)
print("Using "+str(len(feature_names))+" features...")
features = train_sample[feature_names].values
target = train_sample[response_name].values
classifier = model.model()
classifier.fit(features, target)
# print the time interval
print("Time used,")
print datetime.now() - tstart
print("Saving the classifier...")
tstart = datetime.now()
data_io.save_model(classifier, isBook)
print("Time used,")
print datetime.now() - tstart
示例14: __init__
def __init__(self, **kwargs):
self.model = model.model(**kwargs)
self._clear_implementation()
u = mdl_updater.model_updater(self.model)
u.ceph_version_refresh()
q = mdl_query.mdl_query(self.model)
self.ceph_daemon_user = q.ceph_daemon_user()
示例15: evaluate
def evaluate(image_path, label_path):
""" Loads network, reads image and returns IOU."""
# Load image and label
image = load_image(image_path)
label = scipy.misc.imread(label_path)
# Define the model
prediction = model.model(image, drop=False)
# Get a saver
saver = tf.train.Saver()
# Launch the graph
with tf.Session() as sess:
# Restore variables
checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)
saver.restore(sess, checkpoint_path)
model.log("Variables restored from:", checkpoint_path)
logits = prediction.eval()
segmentation = post(logits, label, threshold = -1)
iou = IOU(segmentation, label)
print("iou =", iou)
return iou