本文整理汇总了Python中util.load函数的典型用法代码示例。如果您正苦于以下问题:Python load函数的具体用法?Python load怎么用?Python load使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了load函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: codeFile
def codeFile(args,flag,data):
PARAM_KEY = 1;
PARAM_FILE = 2; # Output file location
PARAM_FORMATTER = 3
ARGUMENTS = len(args)-1
# Ability to add a block of code through copy and paste and have it formatted correctly!
if( keyExists("files",args[PARAM_KEY])):
_file = json.loads(load("files/"+args[PARAM_KEY]));
out = ''
# loadJSON
for x in _file:
block = str(load("blocks/"+ x))
if(ARGUMENTS == PARAM_FORMATTER): # Alter all the blocks in said fashion
block = format.block(block, args[PARAM_FORMATTER])
out += block
out += "\n" # Adds some spacing between blocks
# No file specified
if(len(args) < 3 ):
log(out)
else:
log("Saving to file "+ args[PARAM_FILE] )
save(args[PARAM_FILE],out)
else:
error("Error: File does not exist")
示例2: codeProject
def codeProject(args,flag,data):
PARAM_KEY = 1
PARAM_PATH = 2
PARAM_FORMATTER = 3
ARGUMENTS = len(args)-1
# JSON mapping files and storage of this
if( keyExists("projects",args[1])):
if( "stdout" in args[2]):
project = json.loads(load("projects/"+args[PARAM_KEY])); # Uses key value storage
directory = args[PARAM_PATH] + "/" + args[PARAM_KEY]
mkdir(directory)
for x in project.keys(): # Reflect that with here
_file = json.loads(load("files/"+x));
out = '';
for y in _file:
block = str(load("blocks/"+ y))
if(ARGUMENTS == PARAM_FORMATTER): # Alter all the blocks in said fashion
block = format.block(block, args[PARAM_FORMATTER])
out += block
# Output the file with the correct file name
save(directory + "/" + project[x],out)
else:
error("Error: Project does not exist")
示例3: _buildmeta
def _buildmeta(ui, repo, args, partial=False, skipuuid=False):
if repo is None:
raise error.RepoError("There is no Mercurial repository"
" here (.hg not found)")
dest = None
validateuuid = False
if len(args) == 1:
dest = args[0]
validateuuid = True
elif len(args) > 1:
raise hgutil.Abort('rebuildmeta takes 1 or no arguments')
url = repo.ui.expandpath(dest or repo.ui.config('paths', 'default-push') or
repo.ui.config('paths', 'default') or '')
meta = svnmeta.SVNMeta(repo, skiperrorcheck=True)
svn = None
if meta.subdir is None:
svn = svnrepo.svnremoterepo(ui, url).svn
meta.subdir = svn.subdir
youngest = 0
startrev = 0
sofar = []
branchinfo = {}
if partial:
try:
# we can't use meta.lastpulled here because we are bootstraping the
# lastpulled and want to keep the cached value on disk during a
# partial rebuild
foundpartialinfo = False
youngestpath = os.path.join(meta.metapath, 'lastpulled')
if os.path.exists(youngestpath):
youngest = util.load(youngestpath)
sofar = list(maps.RevMap.readmapfile(meta.revmap_file))
if sofar and len(sofar[-1].split(' ', 2)) > 1:
lasthash = sofar[-1].split(' ', 2)[1]
startrev = repo[lasthash].rev() + 1
branchinfo = util.load(meta.branch_info_file)
foundpartialinfo = True
if not foundpartialinfo:
ui.status('missing some metadata -- doing a full rebuild\n')
partial = False
except IOError, err:
if err.errno != errno.ENOENT:
raise
ui.status('missing some metadata -- doing a full rebuild\n')
except AttributeError:
ui.status('no metadata available -- doing a full rebuild\n')
示例4: MDSPlotTest
def MDSPlotTest():
import json
import experiment
resPath = "../experiments/ebook_color_pca_3"
experiment.experimentCase("../params/ebook_color_pca_28x28_3.json",resPath)
info = json.loads(util.fileString("../params/ebook_color_pca_28x28_3.json"))
info = util.dotdict(info)
x = util.load(resPath+"/x.pkl")
print x.dtype
compressed = util.load(resPath+"/compressed.pkl")
MDSPlots(x,compressed,info.dataSet.shape)
import matplotlib.pyplot as plt
fig.savefig()
print("show figure")
plt.show()
示例5: main
def main():
# establish postgresql connection
con = psycopg2.connect(database='mimic', user='mimic',
host='localhost', password='mimic')
# 1. extract and export list of adults
adults = extract_adults(con)
adults.to_csv("lists/adults_admitted.csv", index=False,
columns=['subject_id', 'combined_dod', 'outtime', 'age'])
# ----
# 2. extract patients with cardiovascular conditions
adults = load("lists/adults_admitted.csv")
adults_list = set(adults['subject_id'].tolist())
heart_patients = generate_heart_patients(con, adults_list)
heart_patients.to_csv("lists/heart_patients.csv", index=False, header=True)
adults_heart = pd.merge(adults, heart_patients,
on=['subject_id'], how='inner')
# 3. then trim down the list to patients who were discharged alive
discharged = lived(adults_heart)
discharged.to_csv("lists/adults_heart_discharged.csv", index=False)
# discharged = load("lists/adults_heart_discharged.csv")
# 4. from those discharged patients, find the ones with a second
# admission and calculate the data difference between first and second
readmitted = readmission_diff(con, discharged['subject_id'].tolist())
readmitted.to_csv("lists/readmission_diff.csv", index=False)
# 5. generate icu id's of first visits (could be used for awk)
generate_icu_id(con, "lists/readmission_diff.csv",
"lists/first_icu_list.txt")
示例6: sgd_optimize
def sgd_optimize(learning_rate=0.1,
pretrain_learning_rate=0.001,
pretrain_epochs=15,
finetune_epochs=1000,
batch_size=20):
# Load datasets
train, valid, test = util.load()
print "loading 0 - ", train[0].shape[0], " train inputs in gpu memory"
train_x, train_y = util.create_theano_shared(train)
print "loading 0 - ", valid[0].shape[0], " validation inputs in gpu memory"
valid_x, valid_y = util.create_theano_shared(valid)
print "loading 0 - ", test[0].shape[0], " test inputs in gpu memory"
test_x, test_y = util.create_theano_shared(test)
n_train_batches = train[0].shape[0] / batch_size
n_valid_batches = valid[0].shape[0] / batch_size
n_test_batches = test[0].shape[0] / batch_size
random_generator = numpy.random.RandomState(1)
print "...Building model"
sd = StackedDenoisingAutoEncoders(random_generator,
hidden_layer_sizes=[1000, 1000, 1000])
print "...Getting pretrain functions"
pretrain_fns = sd.pretrain(train_x, batch_size)
#############
# Pretrain
############
print "... Pre-training model"
start_time = time.clock()
## Pre-train layer-wise
corruption_levels = [.1, .2, .3]
for i in range(sd.n_layers):
for epoch in range(pretrain_epochs):
c = []
for batch_index in xrange(n_train_batches):
c.append(pretrain_fns[i](index=batch_index,
corruption_level=corruption_levels[i],
learning_rate=pretrain_learning_rate))
print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch),
print numpy.mean(c)
end_time = time.clock()
print "Pretraining code ran for %.2fm" % (end_time - start_time)
#############
# Finetune
############
print "...Fine-tuning model"
train_model, valid_model, test_model = sd.finetune(train_x, train_y,
valid_x, valid_y,
test_x, test_y,
batch_size, learning_rate)
util.train_test_model(finetune_epochs, train_model, valid_model, test_model,
n_train_batches, n_valid_batches, n_test_batches)
示例7: generate_features
def generate_features(provided_list=None):
con = psycopg2.connect(database='mimic', user='mimic', host='localhost',
password='mimic')
# load list of first ham id
first_icu = load("lists/adults_heart_discharged.csv",
cols=['subject_id', 'hadm_id', 'icustay_id']).\
sort_values(by=['subject_id'])
# if a patient list if provided, then only look at those patients
if provided_list is not None:
first_icu = first_icu[first_icu['subject_id'].isin(provided_list)]
hids = set(first_icu['hadm_id'].tolist())
icu_list = first_icu['icustay_id'].tolist()
p_list = first_icu['subject_id'].tolist()
patients = first_icu['subject_id']
# get comorbidity scores
comorb = comorb_scores(con, hids)
# get oasis scores etc
phys_scores = generate_phys_scores(con, icu_list, p_list)
combined_scores = np.concatenate((np.asmatrix(comorb),
phys_scores), axis=1)
np.savetxt("X.csv", combined_scores, delimiter=",")
return patients, combined_scores
示例8: test_tour
def test_tour(par=1):
ip.reset(par)
scene = 'disrupt-11'
scan = dset.Scan('../data/%s' % scene)
texel_colors = ut.load(ut.pjoin(figures.make_path('noloo', 'interior-wide', scene), 'data.pk'))['ret'][0]
mesh = box.load_from_mat(ut.pjoin(scan.path, 'cube.mat'))
tour(scan, mesh, texel_colors, [0, 1, 2], plane_idx = 0, outline_start = 0, par = par)
示例9: retrieve_notes_sql
def retrieve_notes_sql():
con = psycopg2.connect(database='mimic', user='mimic', host='localhost',
password='mimic')
# load list of first icus
icu_list = load("notes/adults_heart_discharged.csv",
cols=['subject_id', 'hadm_id'])
adm_id = icu_list['hadm_id'].tolist()
# then find the first icu stay not in above list
query_list = "("+str(adm_id)[1:-1]+")"
# get all relevant note during first stay
sql_query = """
SELECT SUBJECT_ID,hadm_id, chartdate, charttime,text
FROM mimiciii.noteevents
WHERE (hadm_id IN %s AND category != 'Discharge summary')
ORDER BY SUBJECT_ID, chartdate, charttime
;
""" % query_list
notes = pd.read_sql_query(sql_query, con)
notes.to_csv("notes/notes.csv", index=False)
# print notes.shape
return True
示例10: get_patient_scores
def get_patient_scores(pid):
# initialize postgres connection
con = psycopg2.connect(database='mimic',
user='mimic', host='localhost',
password='mimic')
stay_ids = load("lists/adults_heart_discharged.csv",
cols=['icustay_id'])['icustay_id'].tolist()
# retrive from database all relevant scores
sql_query = """
SELECT * FROM mimiciii.oasis
WHERE SUBJECT_ID = %d
;
""" % (pid)
oasis = pd.read_sql_query(sql_query, con)
oasis = oasis[oasis['icustay_id'].isin(stay_ids)]
sql_query = """
SELECT * FROM mimiciii.sofa
WHERE SUBJECT_ID = %d
;
""" % (pid)
sofa = pd.read_sql_query(sql_query, con)
oasis = oasis[oasis['icustay_id'].isin(stay_ids)]
sql_query = """
SELECT * FROM mimiciii.sapsii
WHERE SUBJECT_ID = %d
;
""" % (pid)
sapsii = pd.read_sql_query(sql_query, con)
sapsii = sapsii[sapsii['icustay_id'].isin(stay_ids)]
sql_query = """
SELECT * FROM mimiciii.sapsii_last
WHERE SUBJECT_ID = %d
;
""" % (pid)
sapsii_last = pd.read_sql_query(sql_query, con)
sapsii_last = sapsii_last[sapsii_last['icustay_id'].isin(stay_ids)]
# combine all scores
data = [oasis.iloc[0][2], sofa.iloc[0][2],
sapsii.iloc[0][2], sapsii_last.iloc[0][2]]
# TODO: make a plot and display in html
'''
#barplot=ax.bar([0,1,2,3],data,0.6,color=['grey','white','grey','white'])
#names = ax.set_xticklabels(['severity illness score',
# 'organ failure assessment',
# 'acute physiology score',
# 'acute physiology score(last)'])
#ax.set_xticks([0,1,2,3])
#ax.set_xlim(-0.3,3.8)
#plt.gcf().subplots_adjust(bottom=0.25)
#plt.setp(names,rotation=30,fontsize=13)
#savefig("predict/fig.png")
'''
return str(data)[1:-1]
示例11: saveModelImages
def saveModelImages(modelPath,dstPath,color = False):
info,sda = util.load(modelPath)
import train
x = train.createDataSet(info["dataSet"]).get_value(borrow=True)
for name,img in createSdaImages(sda,x,color):
dst = dstPath + "/" + name
util.ensurePathExists(dst)
img.save(dst)
示例12: saveTest
def saveTest():
data = [0,1,2,3,4]
name = "test.pkl"
util.save(data,name)
data2 = util.load(name)
print data, data2
return data == data2
示例13: read_test_data
def read_test_data(file):
# assume if one is saved they all are
if util.check_file_exists(CONST.DATASET_PATH + CONST.TEST_PATH):
T_Data = util.load(CONST.DATASET_PATH + CONST.TEST_PATH)
T_Labels = util.load(CONST.DATASET_PATH + CONST.TEST_PATH_LABELS)
T_Queries = util.load(CONST.DATASET_PATH + CONST.TEST_PATH_Q)
T_Docs = util.load(CONST.DATASET_PATH + CONST.TEST_PATH_DOCS)
else:
T_Data, T_Labels, T_Queries, T_Docs = read_train_data(file)
util.save_pickle(CONST.DATASET_PATH + CONST.TEST_PATH, T_Data)
util.save_pickle(CONST.DATASET_PATH + CONST.TEST_PATH_LABELS, T_Labels)
util.save_pickle(CONST.DATASET_PATH + CONST.TEST_PATH_Q, T_Queries)
util.save_pickle(CONST.DATASET_PATH + CONST.TEST_PATH_DOCS, T_Docs)
return T_Data, T_Labels, T_Queries, T_Docs
示例14: get_resource
def get_resource(resource_name):
if RESOURCE_TO_ID.has_key(resource_name):
return DATA_MAP[RESOURCE_TO_ID[resource_name]]
resource_data = load(resource_name)
resurouce_id = RESOURCE_COUNTER
RESOURCE_COUNTER += 1
RESOURCE_TO_ID[resource_name] = resurouce_id
DATA_MAP[resurouce_id] = resource_data
return resource_data
示例15: predict
def predict(data_json, model_path):
preproc = util.load(os.path.dirname(model_path))
dataset = load.load_dataset(data_json)
x, y = preproc.process(*dataset)
model = keras.models.load_model(model_path)
probs = model.predict(x, verbose=1)
return probs