本文整理汇总了Python中reader.Reader.get_data方法的典型用法代码示例。如果您正苦于以下问题:Python Reader.get_data方法的具体用法?Python Reader.get_data怎么用?Python Reader.get_data使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类reader.Reader
的用法示例。
在下文中一共展示了Reader.get_data方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from reader import Reader [as 别名]
# 或者: from reader.Reader import get_data [as 别名]
def main(unused_args):
reader = Reader(split = 0.9)
x_train, y_train, x_test, y_test = reader.get_data(glob('../../WSJ-2-12/*/*.POS'))
print('len(reader.word_to_id)',len(reader.word_to_id),
'len(reader.tag_to_id)', len(reader.tag_to_id))
print('len(x_train)',len(x_train),
'len(x_test)', len(x_test))
best_misclass = 1.0
with tf.Graph().as_default(), tf.Session() as session:
initializer = tf.random_uniform_initializer(-FLAGS.init_scale, FLAGS.init_scale)
with tf.variable_scope("model", reuse=None, initializer=initializer):
m = RNNTagger(True, len(reader.word_to_id), len(reader.tag_to_id))
with tf.variable_scope("model", reuse=True, initializer=initializer):
mtest = RNNTagger(False, len(reader.word_to_id), len(reader.tag_to_id))
tf.initialize_all_variables().run()
saver = tf.train.Saver()
for i in range(FLAGS.max_max_epoch):
lr_decay = FLAGS.lr_decay ** max(i - FLAGS.max_epoch, 0.0)
m.assign_lr(session, FLAGS.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity, _ = run_epoch(session, m, x_train, y_train, m.train_op,
verbose=True)
_, misclass = run_epoch(session, mtest, x_test, y_test, tf.no_op(), verbose=True)
if misclass < best_misclass:
best_misclass = misclass
fname = 'dropout_double_rnn_tagger_' + str(best_misclass)
saver.save(session, fname, global_step=i)
print('saving', fname)
示例2: main
# 需要导入模块: from reader import Reader [as 别名]
# 或者: from reader.Reader import get_data [as 别名]
def main(unused_args):
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.iteritems()):
print("{}={}".format(attr, value))
print("")
reader = Reader(split = 0.9)
(x_train, y_train, mask_train,
x_test, y_test, mask_test) = reader.get_data(glob('../../WSJ-2-12/*/*.POS'))
print('len(x_train)', len(x_train), 'len(x_test)', len(x_test))
print('reader.ignore_ids', reader.ignore_ids)
print('len(reader.word_to_id)',len(reader.word_to_id),
'len(reader.tag_to_id)', len(reader.tag_to_id))
best_misclass = 1.0
with tf.Graph().as_default(), tf.Session() as session:
initializer = tf.random_uniform_initializer(-FLAGS.init_scale, FLAGS.init_scale)
with tf.variable_scope("model", reuse=None, initializer=initializer):
m = BiRNNTagger(True, len(reader.word_to_id), len(reader.tag_to_id), reader.maxlen)
with tf.variable_scope("model", reuse=True, initializer=initializer):
mtest = BiRNNTagger(False, len(reader.word_to_id), len(reader.tag_to_id),
reader.maxlen)
tf.initialize_all_variables().run()
saver = tf.train.Saver()
for i in range(FLAGS.max_max_epoch):
lr_decay = FLAGS.lr_decay ** max(i - FLAGS.max_epoch, 0.0)
m.assign_lr(session, FLAGS.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity, _ = run_epoch(session, m, x_train, y_train, mask_train,
m.train_op, verbose=True)
_, misclass = run_epoch(session, mtest, x_test, y_test, mask_test,
tf.no_op(), verbose=True)
if misclass < best_misclass:
best_misclass = misclass
fname = 'models/dropout_bid3rnn_tagger_' + str(best_misclass)
saver.save(session, fname, global_step=i)
print('saving', fname)
if FLAGS.plot_tsne:
tsne(session.run(m.embedding), reader.word_to_id, FLAGS.tsne_size)
示例3: __init__
# 需要导入模块: from reader import Reader [as 别名]
# 或者: from reader.Reader import get_data [as 别名]
def __init__(self,filename,n_clusters,epsilon=0.05,max_iter=-1):
self.m = 2
self.n_clusters = n_clusters
self.max_iter = max_iter
self.epsilon = epsilon
read = Reader(filename)
self.X,self.y = read.get_data()
self.data_shape = self.X.shape
rows , cols = self.data_shape
self.U = []
for i in range(rows):
index = i % n_clusters
l = [ 0 for j in range(n_clusters) ]
l[index] = 1
self.U.append(l)
self.U = np.array(self.U).astype(np.float)
self.C = []
l = [ 0 for j in range(cols) ]
for i in range(n_clusters):
self.C.append(l)
self.C = np.array(self.C).astype(np.float)