当前位置: 首页>>代码示例>>Python>>正文


Python Model.save方法代码示例

本文整理汇总了Python中model.Model.save方法的典型用法代码示例。如果您正苦于以下问题:Python Model.save方法的具体用法?Python Model.save怎么用?Python Model.save使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在model.Model的用法示例。


在下文中一共展示了Model.save方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: ModelTest

# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import save [as 别名]
class ModelTest(unittest.TestCase):

    def setUp(self):
        self.model = Model(20)

        # initialize self.model.global_topic_hist and
        # self.model.word_topic_hist
        for i in xrange(10):
            ordered_sparse_topic_hist = OrderedSparseTopicHistogram(20)
            for j in xrange(10 + i):
                ordered_sparse_topic_hist.increase_topic(j, j + 1)
                self.model.global_topic_hist[j] += j + 1
            self.model.word_topic_hist[i] = ordered_sparse_topic_hist

    def test_save_and_load(self):
        model_dir = '../testdata/lda_model'
        self.model.save(model_dir)
        self.assertTrue(os.path.exists(model_dir))

        new_model = Model(20)
        new_model.load(model_dir)

        self.assertEqual(new_model.num_topics, self.model.num_topics)
        self.assertEqual(len(new_model.word_topic_hist),
                len(self.model.word_topic_hist))

        for word, new_sparse_topic_hist in new_model.word_topic_hist.iteritems():
            self.assertTrue(word in self.model.word_topic_hist)
            sparse_topic_hist = self.model.word_topic_hist[word]
            self.assertEqual(new_sparse_topic_hist.size(),
                    sparse_topic_hist.size())

            for j in xrange(new_sparse_topic_hist.size()):
                self.assertEqual(new_sparse_topic_hist.non_zeros[j].topic,
                        sparse_topic_hist.non_zeros[j].topic)
                self.assertEqual(new_sparse_topic_hist.non_zeros[j].count,
                        sparse_topic_hist.non_zeros[j].count)

        self.assertEqual(new_model.hyper_params.topic_prior,
                self.model.hyper_params.topic_prior)
        self.assertEqual(new_model.hyper_params.word_prior,
                self.model.hyper_params.word_prior)

        # print self.model

    def test_has_word(self):
        self.assertTrue(self.model.has_word(0))
        self.assertTrue(self.model.has_word(2))
        self.assertTrue(self.model.has_word(4))
        self.assertTrue(self.model.has_word(6))
        self.assertTrue(self.model.has_word(8))
        self.assertFalse(self.model.has_word(10))
        self.assertFalse(self.model.has_word(12))
        self.assertFalse(self.model.has_word(14))
        self.assertFalse(self.model.has_word(16))
        self.assertFalse(self.model.has_word(18))

    def test_get_word_topic_dist(self):
        word_topic_dist = self.model.get_word_topic_dist(10)
        self.assertTrue(len(word_topic_dist))
开发者ID:Ambier,项目名称:python-sparselda,代码行数:62,代码来源:model_test.py

示例2: making_models

# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import save [as 别名]
def making_models(list_methods = ['item_similarity_recommender', 'factorization_recommender', 
                    'ranking_factorization_recommender']):
    '''
    INPUT: None
    DESCRIPTION: Makes and saves the model (deprecated)
    OUTPUT: None
    '''
    model = Model()
    model.save('item_similarity_recommender')
    for method in list_methods:
        model.model = model.build(method)
        model.save(method)
开发者ID:blueconcept,项目名称:food_crawler,代码行数:14,代码来源:modelsaver.py

示例3: sort_all

# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import save [as 别名]
def sort_all(lang):
    model_dir = os.path.join('progress', lang)
    model = Model()

    ff = model.all_files( model_dir )

    fname = ff[2]
    p = fname.split('.')
    fname1 = p[0] + '_s' + '.' + p[1]
    print fname
    print fname1
    model.load(fname)
    model.short_ignore = ['une', 'un']
    model.sort()
    model.save(fname1)
开发者ID:alexandre-solovyov,项目名称:lang,代码行数:17,代码来源:sort.py

示例4: xrange

# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import save [as 别名]
freq_eval = 1000  # evaluate on dev every freq_eval steps
best_dev = -np.inf
best_test = -np.inf
count = 0
for epoch in xrange(n_epochs):
    epoch_costs = []
    print "Starting epoch %i..." % epoch
    for i, index in enumerate(np.random.permutation(len(train_data))):
        count += 1
        input = create_input(train_data[index], parameters, True, singletons)
        new_cost = f_train(*input)
        epoch_costs.append(new_cost)
        if i % 50 == 0 and i > 0 == 0:
            print "%i, cost average: %f" % (i, np.mean(epoch_costs[-50:]))
        if count % freq_eval == 0:
            dev_score = evaluate(parameters, f_eval, dev_sentences,
                                 dev_data, id_to_tag, dico_tags)
            test_score = evaluate(parameters, f_eval, test_sentences,
                                  test_data, id_to_tag, dico_tags)
            print "Score on dev: %.5f" % dev_score
            print "Score on test: %.5f" % test_score
            if dev_score > best_dev:
                best_dev = dev_score
                print "New best score on dev."
                print "Saving model to disk..."
                model.save()
            if test_score > best_test:
                best_test = test_score
                print "New best score on test."
    print "Epoch %i done. Average cost: %f" % (epoch, np.mean(epoch_costs))
开发者ID:metpallyv,项目名称:tagger,代码行数:32,代码来源:train.py

示例5: __init__

# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import save [as 别名]
class Analytics:

	def __init__(self, max_threads=4):
		self.data = Model()
		#self.max_threads = threading.Semaphore(app.config['THREADS'])
		self.active = False
		self.websocket = None
		self.thread = None
		self.websocket_lock = threading.Lock()
		self.stack_lock = threading.Lock()
		self.progress = 0
		self.total = 0

		self.max_threads = threading.Semaphore(4)

	def add_text(self, text, context=[]):
		added = []
		for t in text:
			elt = None
			if t.strip() != "":
				if is_ip(t):
					elt = Ip(is_ip(t), [])
				elif is_url(t):
					elt = Url(is_url(t), [])			
				elif is_hostname(t):
					elt = Hostname(is_hostname(t), [])
				if elt:
					added.append(self.save_element(elt, context))
					
		if len(added) == 1:
			return added[0]
		else:
			return added
		

	def save_element(self, element, context=[], with_status=False):

		element.upgrade_context(context)
		return self.data.save(element, with_status=with_status)
		


	# graph function
	def add_artifacts(self, data, context=[]):
		artifacts = find_artifacts(data)
		
		added = []
		for url in artifacts['urls']:
			added.append(self.save_element(url, context))

		for hostname in artifacts['hostnames']:
			added.append(self.save_element(hostname, context))

		for ip in artifacts['ips']:
			added.append(self.save_element(ip, context))

		return added        


	# elements analytics

	def bulk_asn(self):
		results = self.data.elements.find({'type': 'ip', 'bgp': None})

		ips = []
		debug_output("(getting ASNs for %s IPs)" % results.count(), type='analytics')
		
		for r in results:
			ips.append(r)

		ips_chunks = [ips[x:x+100] for x in xrange(0, len(ips), 100)]

		as_info = {}
		for ips in ips_chunks:
			try:
				as_info = dict(as_info.items() + get_net_info_shadowserver(ips).items())

			except Exception, e:
				pass
		
		if as_info == {}:
			return

		for ip in as_info:
			
			_as = as_info[ip]
			_ip = self.data.find_one({'value': ip})

			if not _ip:
				return

			del _as['ip']
			for key in _as:
				if key not in ['type', 'value', 'context']:
					_ip[key] = _as[key]
			del _as['bgp']

			_as = As.from_dict(_as)

			# commit any changes to DB
#.........这里部分代码省略.........
开发者ID:Tania188,项目名称:malcom,代码行数:103,代码来源:analytics.py

示例6: main

# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import save [as 别名]

#.........这里部分代码省略.........
					sum_loss_discriminator += float(loss_discriminator.data)
					sum_discriminator_z_confidence_true += discriminator_z_confidence_true
					sum_discriminator_z_confidence_fake += discriminator_z_confidence_fake
					sum_discriminator_y_confidence_true += discriminator_y_confidence_true
					sum_discriminator_y_confidence_fake += discriminator_y_confidence_fake

				### generator phase ###
				if True:
					y_onehot_fake_u, z_fake_u = model.encode_x_yz(x_u, apply_softmax_y=True)

					dz_fake = model.discriminate_z(z_fake_u, apply_softmax=False)
					dy_fake = model.discriminate_y(y_onehot_fake_u, apply_softmax=False)

					loss_generator = F.softmax_cross_entropy(dz_fake, class_true) + F.softmax_cross_entropy(dy_fake, class_true)

					model.cleargrads()
					loss_generator.backward()
					optimizer_generator.update()

					sum_loss_generator += float(loss_generator.data)

				### supervised phase ###
				if True:
					logit_l, _ = model.encode_x_yz(x_l, apply_softmax_y=False)
					loss_supervised = F.softmax_cross_entropy(logit_l, y_l)

					model.cleargrads()
					loss_supervised.backward()
					optimizer_semi_supervised.update()

					sum_loss_supervised += float(loss_supervised.data)

				### additional cost ###
				if True:
					identity = np.identity(model.ndim_y, dtype=np.float32)
					if using_gpu:
						identity = cuda.to_gpu(identity)
					mapped_head = model.linear_transformation(identity)
					loss_linear_transformation = F.mean_squared_error(mapped_cluster_head_2d_target, mapped_head)

					model.cleargrads()
					loss_linear_transformation.backward()
					optimizer_linear_transformation.update()

					sum_loss_linear_transformation	+= float(loss_linear_transformation.data)

			printr("Training ... {:3.0f}% ({}/{})".format((itr + 1) / total_iterations_train * 100, itr + 1, total_iterations_train))

		model.save(args.model)

		labeled_iter_train = dataset.get_iterator(args.batchsize * 20, train=True, labeled=True, gpu=using_gpu)
		unlabeled_iter_train = dataset.get_iterator(args.batchsize * 20, train=True, unlabeled=True, gpu=using_gpu)
		average_accuracy_l = 0
		average_accuracy_u = 0
		for x_l, true_label in labeled_iter_train:
			with chainer.no_backprop_mode() and chainer.using_config("train", False):
				y_onehot_l, _ = model.encode_x_yz(x_l, apply_softmax_y=True)
				accuracy = F.accuracy(y_onehot_l, true_label)
				average_accuracy_l += float(accuracy.data)

		for x_u, true_label in unlabeled_iter_train:
			with chainer.no_backprop_mode() and chainer.using_config("train", False):
				y_onehot_u, _ = model.encode_x_yz(x_u, apply_softmax_y=True)
				accuracy = F.accuracy(y_onehot_u, true_label)
				average_accuracy_u += float(accuracy.data)

		average_accuracy_l /= labeled_iter_train.get_total_iterations()
		average_accuracy_u /= unlabeled_iter_train.get_total_iterations()
			
		clear_console()
		print("Epoch {} done in {} sec - loss: g={:.5g}, d={:.5g}, a={:.5g}, s={:.5g}, l={:.5g} - disc_z: true={:.1f}%, fake={:.1f}% - disc_y: true={:.1f}%, fake={:.1f}% - acc: l={:.2f}%, u={:.2f}% - total {} min".format(
			epoch + 1, int(time.time() - epoch_start_time), 
			sum_loss_generator / total_iterations_train, 
			sum_loss_discriminator / total_iterations_train, 
			sum_loss_autoencoder / total_iterations_train, 
			sum_loss_supervised / total_iterations_train, 
			sum_loss_linear_transformation / total_iterations_train, 
			sum_discriminator_z_confidence_true / total_iterations_train * 100, 
			sum_discriminator_z_confidence_fake / total_iterations_train * 100, 
			sum_discriminator_y_confidence_true / total_iterations_train * 100, 
			sum_discriminator_y_confidence_fake / total_iterations_train * 100, 
			average_accuracy_l * 100,
			average_accuracy_u * 100,
			int((time.time() - training_start_time) // 60)))

	if epoch == 50:
		optimizer_encoder.set_learning_rate(0.001)
		optimizer_decoder.set_learning_rate(0.001)
		optimizer_semi_supervised.set_learning_rate(0.01)
		optimizer_generator.set_learning_rate(0.01)
		optimizer_discriminator_y.set_learning_rate(0.01)
		optimizer_discriminator_z.set_learning_rate(0.01)

	if epoch == 1000:
		optimizer_encoder.set_learning_rate(0.0001)
		optimizer_decoder.set_learning_rate(0.0001)
		optimizer_semi_supervised.set_learning_rate(0.001)
		optimizer_generator.set_learning_rate(0.001)
		optimizer_discriminator_y.set_learning_rate(0.001)
		optimizer_discriminator_z.set_learning_rate(0.001)
开发者ID:musyoku,项目名称:adversarial-autoencoder,代码行数:104,代码来源:train.py

示例7: __init__

# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import save [as 别名]
class Analytics:

	def __init__(self, max_threads=4):
		self.data = Model()
		#self.max_threads = threading.Semaphore(app.config['THREADS'])
		self.active = False
		self.status = "Inactive"
		self.websocket = None
		self.thread = None
		self.websocket_lock = threading.Lock()
		self.stack_lock = threading.Lock()
		self.progress = 0
		self.total = 0

		self.max_threads = threading.Semaphore(4)

	def add_text(self, text, tags=[]):
		added = []
		for t in text:
			elt = None
			if t.strip() != "":
				if is_url(t):
					elt = Url(is_url(t), [])
				elif is_hostname(t):
					elt = Hostname(is_hostname(t), [])
				elif is_ip(t):
					elt = Ip(is_ip(t), [])
				if elt:
					added.append(self.save_element(elt, tags))
					
		if len(added) == 1:
			return added[0]
		else:
			return added
		

	def save_element(self, element, tags=[], with_status=False):

		element.upgrade_tags(tags)
		return self.data.save(element, with_status=with_status)
		


	# graph function
	def add_artifacts(self, data, tags=[]):
		artifacts = find_artifacts(data)
		
		added = []
		for url in artifacts['urls']:
			added.append(self.save_element(url, tags))

		for hostname in artifacts['hostnames']:
			added.append(self.save_element(hostname, tags))

		for ip in artifacts['ips']:
			added.append(self.save_element(ip, tags))

		return added        


	# elements analytics

	def bulk_asn(self, items=1000):

		last_analysis = {'$or': [
									{ 'last_analysis': {"$lt": datetime.datetime.utcnow() - datetime.timedelta(days=7)} },
									{ 'last_analysis': None },
								]
						}

		nobgp = {"$or": [{'bgp': None}, last_analysis ]}

		total = self.data.elements.find({ "$and": [{'type': 'ip'}, nobgp]}).count()
		done = 0
		results = [r for r in self.data.elements.find({ "$and": [{'type': 'ip'}, nobgp]})[:items]]

		while len(results) > 0:
		
			ips = []
			debug_output("(getting ASNs for %s IPs - %s/%s done)" % (len(results), done, total), type='analytics')
			
			for r in results:
				ips.append(r)

			as_info = {}
			
			try:
				as_info = get_net_info_shadowserver(ips)
			except Exception, e:
				debug_output("Could not get AS for IPs: %s" % e)
			
			if as_info == {}:
				debug_output("as_info empty", 'error')
				return

			for ip in as_info:
				
				_as = as_info[ip]
				_ip = self.data.find_one({'value': ip})

#.........这里部分代码省略.........
开发者ID:carriercomm,项目名称:malcom,代码行数:103,代码来源:analytics.py

示例8:

# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import save [as 别名]
episode = 0

scores = []
losses = []
qs = []
eps = []
while True:
	episode += 1
	print "Episode #%d"%(episode)
	warmup = 10 if episode == 1 else 0
	score, loss, mean_q, epsilon, predictions = agent.learn(overfit=False, games=1, warmup=warmup)

	print "Loss %f, mean q %f"%(loss, mean_q)
	print "Predictions 0: %d%%, 1: %d%%"%(predictions[0]/sum(predictions)*100, predictions[1]/sum(predictions)*100)

	model.save('model.h5')

	plt.close()

	scores.append(score)
	losses.append(loss)
	qs.append(mean_q)
	eps.append(epsilon)

	s_score = plt.subplot(411)
	s_loss = plt.subplot(412)
	s_q = plt.subplot(413)
	s_eps = plt.subplot(414)

	s_score.set_title('score')
	s_loss.set_title('loss')
开发者ID:blazer82,项目名称:ai,代码行数:33,代码来源:learn.py

示例9: main

# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import save [as 别名]

#.........这里部分代码省略.........
		optimizer_encoder.add_hook(GradientClipping(args.grad_clip))

	optimizer_decoder = Optimizer(args.optimizer, args.learning_rate, args.momentum)
	optimizer_decoder.setup(model.decoder)
	if args.grad_clip > 0:
		optimizer_decoder.add_hook(GradientClipping(args.grad_clip))

	optimizer_discriminator = Optimizer(args.optimizer, args.learning_rate, args.momentum)
	optimizer_discriminator.setup(model.discriminator)
	if args.grad_clip > 0:
		optimizer_discriminator.add_hook(GradientClipping(args.grad_clip))

	using_gpu = False
	if args.gpu_device >= 0:
		cuda.get_device(args.gpu_device).use()
		model.to_gpu()
		using_gpu = True
	xp = model.xp

	# 0 -> true sample
	# 1 -> generated sample
	class_true = np.zeros(args.batchsize, dtype=np.int32)
	class_fake = np.ones(args.batchsize, dtype=np.int32)
	if using_gpu:
		class_true = cuda.to_gpu(class_true)
		class_fake = cuda.to_gpu(class_fake)

	training_start_time = time.time()
	for epoch in range(args.total_epochs):

		sum_loss_generator = 0
		sum_loss_discriminator = 0
		sum_loss_autoencoder = 0
		sum_discriminator_confidence_true = 0
		sum_discriminator_confidence_fake = 0
		epoch_start_time = time.time()
		dataset.shuffle()

		# training
		for itr in range(total_iterations_train):
			# update model parameters
			with chainer.using_config("train", True):
				x_l, y_l, y_onehot_l = dataset.sample_minibatch(args.batchsize, gpu=using_gpu)

				### reconstruction phase ###
				if True:
					z_fake_l = model.encode_x_z(x_l)
					x_reconstruction_l = model.decode_yz_x(y_onehot_l, z_fake_l)
					loss_reconstruction = F.mean_squared_error(x_l, x_reconstruction_l)

					model.cleargrads()
					loss_reconstruction.backward()
					optimizer_encoder.update()
					optimizer_decoder.update()

				### adversarial phase ###
				if True:
					z_fake_l = model.encode_x_z(x_l)
					z_true_batch = sampler.gaussian(args.batchsize, model.ndim_z, mean=0, var=1)
					if using_gpu:
						z_true_batch = cuda.to_gpu(z_true_batch)
					dz_true = model.discriminate_z(z_true_batch, apply_softmax=False)
					dz_fake = model.discriminate_z(z_fake_l, apply_softmax=False)
					discriminator_confidence_true = float(xp.mean(F.softmax(dz_true).data[:, 0]))
					discriminator_confidence_fake = float(xp.mean(F.softmax(dz_fake).data[:, 1]))
					loss_discriminator = F.softmax_cross_entropy(dz_true, class_true) + F.softmax_cross_entropy(dz_fake, class_fake)

					model.cleargrads()
					loss_discriminator.backward()
					optimizer_discriminator.update()

				### generator phase ###
				if True:
					z_fake_l = model.encode_x_z(x_l)
					dz_fake = model.discriminate_z(z_fake_l, apply_softmax=False)
					loss_generator = F.softmax_cross_entropy(dz_fake, class_true)

					model.cleargrads()
					loss_generator.backward()
					optimizer_encoder.update()

				sum_loss_discriminator += float(loss_discriminator.data)
				sum_loss_generator += float(loss_generator.data)
				sum_loss_autoencoder += float(loss_reconstruction.data)
				sum_discriminator_confidence_true += discriminator_confidence_true
				sum_discriminator_confidence_fake += discriminator_confidence_fake

			printr("Training ... {:3.0f}% ({}/{})".format((itr + 1) / total_iterations_train * 100, itr + 1, total_iterations_train))

		model.save(args.model)

		clear_console()
		print("Epoch {} done in {} sec - loss: g={:.5g}, d={:.5g}, a={:.5g} - discriminator: true={:.1f}%, fake={:.1f}% - total {} min".format(
			epoch + 1, int(time.time() - epoch_start_time), 
			sum_loss_generator / total_iterations_train, 
			sum_loss_discriminator / total_iterations_train, 
			sum_loss_autoencoder / total_iterations_train, 
			sum_discriminator_confidence_true / total_iterations_train * 100, 
			sum_discriminator_confidence_fake / total_iterations_train * 100, 
			int((time.time() - training_start_time) // 60)))
开发者ID:musyoku,项目名称:adversarial-autoencoder,代码行数:104,代码来源:train.py

示例10: Segmentor

# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import save [as 别名]

#.........这里部分代码省略.........
        instance_num = len(self.training_unigrams_data)
        processing_print_interval = instance_num / 10 
        if processing_print_interval == 0 :
            processing_print_interval = 1 
        best_f = NEG_INF
        best_ite = -1
        best_model_data = None
        for ite in range(self.max_iter) :
            logging.info("training iteration %d ." %(ite + 1))
            for instance_id in range(instance_num) :
                instance = self.training_unigrams_data[instance_id]
                tags = self.training_tags_data[instance_id]
                predicted_tags = self.decoder.decode(self.extractor , self.model , self.constrain , instance )
                assert(len(tags) == len(predicted_tags))
                gold_features = self.decoder.calculate_label_sequence_feature(tags , instance , self.extractor , self.model)
                predicted_features = self.decoder.get_current_predict_label_sequence_feature()
                self.model.update_model(gold_features , predicted_features)
                #logging
                if ( instance_id + 1 ) % processing_print_interval == 0 :
                    current_ite_percent = ( instance_id + 1 ) / processing_print_interval * 10 
                    logging.info("Ite %d : %d instance processed. (%d%% / %d%%)" %( ite + 1 , instance_id + 1 ,
                                  current_ite_percent , current_ite_percent / self.max_iter +  float(ite) / self.max_iter * 100  ))
            logging.info("Ite %d done . %d instance processed. (%d%%)" %( ite + 1 , instance_num ,
                         float(ite+1) / self.max_iter * 100 ))
            f = self._4training_evaluate_processing(dev_path)
            #! save temporary model if best
            if f > best_f :
                best_f = f 
                best_ite = ite
                best_model_data = self.model.get_current_saving_data()
                logging.info("currently iteration %d get the best f1-score" %(ite + 1))
        logging.info("Training done.")
        logging.info("saving model at iteration %d with has best f1-score %.2f%%" %( best_ite + 1 , best_f * 100))
        self._save(model_saving_path , best_model_data )

    def _4training_evaluate_processing(self , dev_path) :
        nr_processing_right = 0
        nr_gold = 0
        nr_processing = 0
        for instance in DatasetHandler.read_dev_data(dev_path) :
            unigrams , gold_tags = Segmentor._processing_one_segmented_WSAtom_instance2unigrams_and_tags(instance)
            predict_tags = Decoder.decode_for_predict(self.extractor , self.model , self.constrain , unigrams)
            gold_coor_seq = self.__innerfunc_4evaluate_generate_word_coordinate_sequence_from_tags(gold_tags)
            predict_coor_seq = self.__innerfunc_4evaluate_generate_word_coordinate_sequence_from_tags(predict_tags)
            cur_nr_gold , cur_nr_processing , cur_nr_processing_right = (
                            self.__innerfunc_4evaluate_get_nr_gold_and_processing_and_processing_right(gold_coor_seq , predict_coor_seq)
                    )
            nr_gold += cur_nr_gold
            nr_processing += cur_nr_processing
            nr_processing_right += cur_nr_processing_right
        p , r , f = self.__innerfunc_4evaluate_calculate_prf(nr_gold , nr_processing , nr_processing_right)
        print >>sys.stderr , ("Eval result :\np : %.2f%% r : %.2f%% f : %.2f%%\n"
               "total word num : %d total predict word num : %d predict right num : %d ")%(
                p * 100 , r * 100, f * 100 , nr_gold , nr_processing , nr_processing_right
                )
        return f
    
    def __innerfunc_4evaluate_generate_word_coordinate_sequence_from_tags(self , tags) :
        '''
        generate coordinate sequence from tags 
        => B M E S S              (tags)
        => (0,2) , (3,3) , (4,4)  (generate coordinate sequence)
        => (中国人)(棒)(棒)  (generate word sequence directly)
        that means , every coordiante stands for a word in the origin word sequence .
        do this , it [may be more convenient than genenrate word directly] from tags
        '''
开发者ID:memeda,项目名称:cws,代码行数:70,代码来源:segmentor.py

示例11: __init__

# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import save [as 别名]
class Analytics:

	def __init__(self, max_threads=4):
		self.data = Model()
		#self.max_threads = threading.Semaphore(app.config['THREADS'])
		self.active = False
		self.websocket = None
		self.thread = None
		self.websocket_lock = threading.Lock()
		self.progress = 0
		self.total = 0

		self.max_threads = threading.Semaphore(4)

	def add_text(self, text, context=[]):
		added = []
		for t in text:
			elt = None
			if t.strip() != "":
				if is_ip(t):
					elt = Ip(is_ip(t), [])
				elif is_url(t):
					elt = Url(is_url(t), [])			
				elif is_hostname(t):
					elt = Hostname(is_hostname(t), [])
				if elt:
					added.append(self.save_element(elt, context))
					
		if len(added) == 1:
			return added[0]
		else:
			return added
		

	def save_element(self, element, context=[], with_status=False):

		element.upgrade_context(context)
		return self.data.save(element, with_status=with_status)
		


	# graph function
	def add_artifacts(self, data, context=[]):
		artifacts = find_artifacts(data)
		
		added = []
		for url in artifacts['urls']:
			added.append(self.data.save(url, context))

		for hostname in artifacts['hostnames']:
			added.append(self.data.hostname_add(hostname, context))

		for ip in artifacts['ips']:
			added.append(self.data.ip_add(ip, context))

		return added        


	# elements analytics

	def bulk_asn(self):
		results = self.data.elements.find({ 'type': 'ip' })
		
		#elts = []
		ips = []
		debug_output("(getting ASNs for %s IPs)" % results.count(), type='analytics')
		
		for r in results:
			ips.append(r)

		as_info = get_net_info_shadowserver(ips)
		
		if not as_info:
			return

		for ip in as_info:
			
			_as = as_info[ip]
			_ip = self.data.find_one({'value': ip})
			
			del _as['ip']
			for key in _as:
				if key not in ['type', 'value', 'context']:
					_ip[key] = _as[key]
			del _as['bgp']

			_as = As.from_dict(_as)

			# commit any changes to DB
			_as = self.save_element(_as)
			_ip = self.save_element(_ip)

			if _as and _ip:
				self.data.connect(_ip, _as, 'net_info')



	def find_evil(self, elt, depth=2, node_links=([],[])):
		evil_nodes = []
		evil_links = []
#.........这里部分代码省略.........
开发者ID:ant4g0nist,项目名称:malcom,代码行数:103,代码来源:analytics.py

示例12: main

# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import save [as 别名]

#.........这里部分代码省略.........

		epoch_start_time = time.time()
		dataset.shuffle()

		# training
		for itr in range(total_iterations_train):
			# update model parameters
			with chainer.using_config("train", True):
				# sample minibatch
				x_u, _, _ = dataset.sample_minibatch(args.batchsize, gpu=using_gpu)
				
				### reconstruction phase ###
				if True:
					y_onehot_u, z_u = model.encode_x_yz(x_u, apply_softmax_y=True)
					repr_u = model.encode_yz_representation(y_onehot_u, z_u)
					x_reconstruction_u = model.decode_representation_x(repr_u)
					loss_reconstruction = F.mean_squared_error(x_u, x_reconstruction_u)

					model.cleargrads()
					loss_reconstruction.backward()
					optimizer_encoder.update()
					optimizer_cluster_head.update()
					optimizer_decoder.update()

				### adversarial phase ###
				if True:
					y_onehot_fake_u, z_fake_u = model.encode_x_yz(x_u, apply_softmax_y=True)

					z_true = sampler.gaussian(args.batchsize, model.ndim_z, mean=0, var=1)
					y_onehot_true = sampler.onehot_categorical(args.batchsize, model.ndim_y)
					if using_gpu:
						z_true = cuda.to_gpu(z_true)
						y_onehot_true = cuda.to_gpu(y_onehot_true)

					dz_true = model.discriminate_z(z_true, apply_softmax=False)
					dz_fake = model.discriminate_z(z_fake_u, apply_softmax=False)
					dy_true = model.discriminate_y(y_onehot_true, apply_softmax=False)
					dy_fake = model.discriminate_y(y_onehot_fake_u, apply_softmax=False)

					discriminator_z_confidence_true = float(xp.mean(F.softmax(dz_true).data[:, 0]))
					discriminator_z_confidence_fake = float(xp.mean(F.softmax(dz_fake).data[:, 1]))
					discriminator_y_confidence_true = float(xp.mean(F.softmax(dy_true).data[:, 0]))
					discriminator_y_confidence_fake = float(xp.mean(F.softmax(dy_fake).data[:, 1]))

					loss_discriminator_z = F.softmax_cross_entropy(dz_true, class_true) + F.softmax_cross_entropy(dz_fake, class_fake)
					loss_discriminator_y = F.softmax_cross_entropy(dy_true, class_true) + F.softmax_cross_entropy(dy_fake, class_fake)
					loss_discriminator = loss_discriminator_z + loss_discriminator_y

					model.cleargrads()
					loss_discriminator.backward()
					optimizer_discriminator_z.update()
					optimizer_discriminator_y.update()

				### generator phase ###
				if True:
					y_onehot_fake_u, z_fake_u = model.encode_x_yz(x_u, apply_softmax_y=True)

					dz_fake = model.discriminate_z(z_fake_u, apply_softmax=False)
					dy_fake = model.discriminate_y(y_onehot_fake_u, apply_softmax=False)

					loss_generator = F.softmax_cross_entropy(dz_fake, class_true) + F.softmax_cross_entropy(dy_fake, class_true)

					model.cleargrads()
					loss_generator.backward()
					optimizer_encoder.update()

				### additional cost ###
				if True:
					distance = model.compute_distance_of_cluster_heads()
					loss_cluster_head = -F.sum(distance)

					model.cleargrads()
					loss_cluster_head.backward()
					optimizer_cluster_head.update()

				sum_loss_discriminator 	+= float(loss_discriminator.data)
				sum_loss_generator 		+= float(loss_generator.data)
				sum_loss_autoencoder 	+= float(loss_reconstruction.data)
				sum_loss_cluster_head	+= float(model.nCr(model.ndim_y, 2) * model.cluster_head_distance_threshold + loss_cluster_head.data)
				sum_discriminator_z_confidence_true += discriminator_z_confidence_true
				sum_discriminator_z_confidence_fake += discriminator_z_confidence_fake
				sum_discriminator_y_confidence_true += discriminator_y_confidence_true
				sum_discriminator_y_confidence_fake += discriminator_y_confidence_fake

			printr("Training ... {:3.0f}% ({}/{})".format((itr + 1) / total_iterations_train * 100, itr + 1, total_iterations_train))

		model.save(args.model)
			
		clear_console()
		print("Epoch {} done in {} sec - loss: g={:.5g}, d={:.5g}, a={:.5g}, c={:.5g} - disc_z: true={:.1f}%, fake={:.1f}% - disc_y: true={:.1f}%, fake={:.1f}% - total {} min".format(
			epoch + 1, int(time.time() - epoch_start_time), 
			sum_loss_generator / total_iterations_train, 
			sum_loss_discriminator / total_iterations_train, 
			sum_loss_autoencoder / total_iterations_train, 
			sum_loss_cluster_head / total_iterations_train, 
			sum_discriminator_z_confidence_true / total_iterations_train * 100, 
			sum_discriminator_z_confidence_fake / total_iterations_train * 100, 
			sum_discriminator_y_confidence_true / total_iterations_train * 100, 
			sum_discriminator_y_confidence_fake / total_iterations_train * 100, 
			int((time.time() - training_start_time) // 60)))
开发者ID:musyoku,项目名称:adversarial-autoencoder,代码行数:104,代码来源:train.py

示例13: Model

# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import save [as 别名]
  frames = 7
  width, height = 1000, 1000

  # create and train a model if the file isnt there 
  if not os.path.exists(file):
    m = Model( bbox, width, height, None, frames )
    gps = gzip.open('all.tsv.gz', 'r').readlines()
  
    # loop over the points 
    for i in xrange(1, len(gps)):
      p2 = gps[i].strip().split('\t')
      p1 = gps[i-1].strip().split('\t')
      print p1
  
      # only train the model with points on the same track (ID)
      if p1[0] == p2[0]:
        m.train( p1, p2, int( p1[1][8:10] )-1 )
  
  
    print 'Finished training...now save'
    #m.visualize()
    m.save(file)
    predict(m)
  
  else:
    m = Model( bbox, width, height, file )
    predict(m)
    #for i in range(frames):
    #  print i
    #  m.visualize(i)
开发者ID:NKSG,项目名称:uber-data,代码行数:32,代码来源:task1_2.py


注:本文中的model.Model.save方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。