本文整理汇总了Python中model.Model.load方法的典型用法代码示例。如果您正苦于以下问题:Python Model.load方法的具体用法?Python Model.load怎么用?Python Model.load使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类model.Model
的用法示例。
在下文中一共展示了Model.load方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import load [as 别名]
def load(filename):
dir_path = os.path.dirname(os.path.realpath(__file__))
# print dir_path
path = os.path.join(dir_path, '..', 'testdata', filename)
model = Model()
model.load(path)
return model
示例2: test_save_and_load
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import load [as 别名]
def test_save_and_load(self):
model_dir = '../testdata/lda_model'
self.model.save(model_dir)
self.assertTrue(os.path.exists(model_dir))
new_model = Model(20)
new_model.load(model_dir)
self.assertEqual(new_model.num_topics, self.model.num_topics)
self.assertEqual(len(new_model.word_topic_hist),
len(self.model.word_topic_hist))
for word, new_sparse_topic_hist in new_model.word_topic_hist.iteritems():
self.assertTrue(word in self.model.word_topic_hist)
sparse_topic_hist = self.model.word_topic_hist[word]
self.assertEqual(new_sparse_topic_hist.size(),
sparse_topic_hist.size())
for j in xrange(new_sparse_topic_hist.size()):
self.assertEqual(new_sparse_topic_hist.non_zeros[j].topic,
sparse_topic_hist.non_zeros[j].topic)
self.assertEqual(new_sparse_topic_hist.non_zeros[j].count,
sparse_topic_hist.non_zeros[j].count)
self.assertEqual(new_model.hyper_params.topic_prior,
self.model.hyper_params.topic_prior)
self.assertEqual(new_model.hyper_params.word_prior,
self.model.hyper_params.word_prior)
示例3: Test
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import load [as 别名]
def Test():
import time
import pyGuiWrapper as gui
global model
model = Model()
model.load("testModel.xml")
gui.go(lambda parent,menu,tool,status,m=model: Panel(parent,menu,tool,status, m))
示例4: sort_all
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import load [as 别名]
def sort_all(lang):
model_dir = os.path.join('progress', lang)
model = Model()
ff = model.all_files( model_dir )
fname = ff[2]
p = fname.split('.')
fname1 = p[0] + '_s' + '.' + p[1]
print fname
print fname1
model.load(fname)
model.short_ignore = ['une', 'un']
model.sort()
model.save(fname1)
示例5: loadFromCheckpoint
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import load [as 别名]
def loadFromCheckpoint(savedModelDir):
""" Load saved model.
@param savedModelDir (string)
Directory of where the experiment is to be or was saved
@returns (nupic.frameworks.opf.model.Model) The loaded model instance.
"""
return Model.load(savedModelDir)
示例6: process
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import load [as 别名]
def process():
data = request.form['input']
data = nltk.sent_tokenize(data)
data = map(nltk.word_tokenize, data)
properties = request.form['model']
properties = json.loads(properties)
model = Model.load(os.path.join(models_directory, properties["name"], "model"))
labels = model.predict(data)
output = None
if properties["type"] == "svm":
output = labels[libml.SVM]
elif properties["type"] == "crf":
output = labels[libml.CRF]
elif properties["type"] == "lin":
output = labels[libml.LIN]
output = sum(output, [])
data = sum(data, [])
output = zip(data, output)
return render_template("result.html", input = request.form["input"], model = properties["name"] + " - " + properties["type"].upper(), output = output)
示例7: plot_clusters
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import load [as 别名]
def plot_clusters():
dataset_train, dataset_test = chainer.datasets.get_mnist()
images_train, labels_train = dataset_train._datasets
images_test, labels_test = dataset_test._datasets
dataset_indices = np.arange(0, len(images_test))
np.random.shuffle(dataset_indices)
model = Model()
assert model.load("model.hdf5")
# normalize
images_train = (images_train - 0.5) * 2
images_test = (images_test - 0.5) * 2
num_clusters = model.ndim_y
num_plots_per_cluster = 11
image_width = 28
image_height = 28
ndim_x = image_width * image_height
pylab.gray()
with chainer.no_backprop_mode() and chainer.using_config("train", False):
# plot cluster head
head_y = np.identity(model.ndim_y, dtype=np.float32)
zero_z = np.zeros((model.ndim_y, model.ndim_z), dtype=np.float32)
head_x = model.decode_yz_x(head_y, zero_z).data
head_x = (head_x + 1.0) / 2.0
for n in range(num_clusters):
pylab.subplot(num_clusters, num_plots_per_cluster + 2, n * (num_plots_per_cluster + 2) + 1)
pylab.imshow(head_x[n].reshape((image_width, image_height)), interpolation="none")
pylab.axis("off")
# plot elements in cluster
counts = [0 for i in range(num_clusters)]
indices = np.arange(len(images_test))
np.random.shuffle(indices)
batchsize = 500
i = 0
x_batch = np.zeros((batchsize, ndim_x), dtype=np.float32)
for n in range(len(images_test) // batchsize):
for b in range(batchsize):
x_batch[b] = images_test[indices[i]]
i += 1
y_batch = model.encode_x_yz(x_batch)[0].data
labels = np.argmax(y_batch, axis=1)
for m in range(labels.size):
cluster = int(labels[m])
counts[cluster] += 1
if counts[cluster] <= num_plots_per_cluster:
x = (x_batch[m] + 1.0) / 2.0
pylab.subplot(num_clusters, num_plots_per_cluster + 2, cluster * (num_plots_per_cluster + 2) + 2 + counts[cluster])
pylab.imshow(x.reshape((image_width, image_height)), interpolation="none")
pylab.axis("off")
fig = pylab.gcf()
fig.set_size_inches(num_plots_per_cluster, num_clusters)
pylab.savefig("clusters.png")
示例8: loadFromCheckpoint
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import load [as 别名]
def loadFromCheckpoint(savedModelDir, newSerialization=False):
""" Load saved model.
@param savedModelDir (string)
Directory of where the experiment is to be or was saved
@returns (nupic.frameworks.opf.model.Model) The loaded model instance.
"""
if newSerialization:
return CLAModel.readFromCheckpoint(savedModelDir)
else:
return Model.load(savedModelDir)
示例9: test_load
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import load [as 别名]
def test_load(self):
filename = 'test3.lang'
dir_path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(dir_path, '..', 'testdata', filename)
m = Model()
self.assertEqual(m.load(path), True)
self.assertEqual(m.forms.forms('parler', 'PrInd'), ['parle', 'parles', 'parle', 'parlons', 'parlez', 'parlent'])
self.assertEqual(m.forms.init_forms('parle'), ['parler'])
示例10: predict
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import load [as 别名]
def predict(files, model_path, output_dir, format):
# Must specify output format
if format not in Note.supportedFormats():
print >>sys.stderr, '\n\tError: Must specify output format'
print >>sys.stderr, '\tAvailable formats: ', ' | '.join(Note.supportedFormats())
print >>sys.stderr, ''
exit(1)
# Load model
model = Model.load(model_path)
# Tell user if not predicting
if not files:
print >>sys.stderr, "\n\tNote: You did not supply any input files\n"
exit()
# For each file, predict concept labels
n = len(files)
for i,txt in enumerate(sorted(files)):
# Read the data into a Note object
note = Note(format)
note.read(txt)
print '-' * 30
print '\n\t%d of %d' % (i+1,n)
print '\t', txt, '\n'
# Predict concept labels
labels = model.predict(note)
# Get predictions in proper format
extension = note.getExtension()
output = note.write(labels)
#print output
# Output file
fname = os.path.splitext(os.path.basename(txt))[0] + '.' + extension
out_path = os.path.join(output_dir, fname)
# Output the concept predictions
print '\n\nwriting to: ', out_path
with open(out_path, 'w') as f:
print >>f, output
print
示例11: loadFromCheckpoint
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import load [as 别名]
def loadFromCheckpoint(savedModelDir, newSerialization=False):
""" Load saved model.
:param savedModelDir: (string)
Directory of where the experiment is to be or was saved
:returns: (:class:`nupic.frameworks.opf.model.Model`) The loaded model
instance.
"""
if newSerialization:
return HTMPredictionModel.readFromCheckpoint(savedModelDir)
else:
return Model.load(savedModelDir)
示例12: plot_cluster_head
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import load [as 别名]
def plot_cluster_head():
parser = argparse.ArgumentParser()
parser.add_argument("--model", "-m", type=str, default="model.hdf5")
args = parser.parse_args()
model = Model()
assert model.load(args.model)
all_y = np.identity(10, dtype=np.float32)
head = model.cluster_head(all_y).data
labels = [i for i in range(10)]
plot.scatter_labeled_z(head, labels, "cluster_head.png")
示例13: __init__
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import load [as 别名]
def __init__(self,model_file_name,fName=None,gp=None,beam_size=40,test_time=False):
self.test_time=test_time
self.features=Features()
self.beam_size=beam_size
self.model=Model.load(model_file_name)
if gp:
self.perceptron=gp
return
elif fName is not None:
self.perceptron_state=PerceptronSharedState.load(fName,retrainable=True)
else:
self.perceptron_state=PerceptronSharedState(5000000)
self.perceptron=GPerceptron.from_shared_state(self.perceptron_state)
示例14: plot_mapped_cluster_head
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import load [as 别名]
def plot_mapped_cluster_head():
parser = argparse.ArgumentParser()
parser.add_argument("--model", "-m", type=str, default="model.hdf5")
args = parser.parse_args()
model = Model()
assert model.load(args.model)
identity = np.identity(model.ndim_y, dtype=np.float32)
mapped_head = model.linear_transformation(identity)
labels = [i for i in range(10)]
plot.scatter_labeled_z(mapped_head.data, labels, "cluster_head.png")
示例15: plot_analogy
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import load [as 别名]
def plot_analogy():
dataset_train, dataset_test = chainer.datasets.get_mnist()
images_train, labels_train = dataset_train._datasets
images_test, labels_test = dataset_test._datasets
dataset_indices = np.arange(0, len(images_test))
np.random.shuffle(dataset_indices)
model = Model()
assert model.load("model.hdf5")
# normalize
images_train = (images_train - 0.5) * 2
images_test = (images_test - 0.5) * 2
num_analogies = 10
pylab.gray()
batch_indices = dataset_indices[:num_analogies]
x_batch = images_test[batch_indices]
y_batch = labels_test[batch_indices]
y_onehot_batch = onehot(y_batch)
with chainer.no_backprop_mode() and chainer.using_config("train", False):
z_batch = model.encode_x_yz(x_batch)[1].data
# plot original image on the left
x_batch = (x_batch + 1.0) / 2.0
for m in range(num_analogies):
pylab.subplot(num_analogies, 10 + 2, m * 12 + 1)
pylab.imshow(x_batch[m].reshape((28, 28)), interpolation="none")
pylab.axis("off")
all_y = np.identity(10, dtype=np.float32)
for m in range(num_analogies):
# copy z_batch as many as the number of classes
fixed_z = np.repeat(z_batch[m].reshape(1, -1), 10, axis=0)
representation = model.encode_yz_representation(all_y, fixed_z)
gen_x = model.decode_representation_x(representation).data
gen_x = (gen_x + 1.0) / 2.0
# plot images generated from each label
for n in range(10):
pylab.subplot(num_analogies, 10 + 2, m * 12 + 3 + n)
pylab.imshow(gen_x[n].reshape((28, 28)), interpolation="none")
pylab.axis("off")
fig = pylab.gcf()
fig.set_size_inches(num_analogies, 10)
pylab.savefig("analogy.png")