本文整理匯總了Python中utils.set_logger方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.set_logger方法的具體用法?Python utils.set_logger怎麽用?Python utils.set_logger使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類utils
的用法示例。
在下文中一共展示了utils.set_logger方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import set_logger [as 別名]
def __init__(self, timestep, window, batch_size, vocab_size, paramSavePath, logPath, input_dim, hidden_size, keep_prob, L, timestr, debug):
self.name = 'g'
self.timestep = timestep
self.hidden_size = hidden_size
self.input_dim = input_dim
self.window = window
self.keep_prob = keep_prob
self.L = L # options['L'] in author's code, for numerical stability. But why? Author doesn't explain...
self.paramSavePath = paramSavePath
self.logPath = logPath
self.timestr = timestr
# first input
self.batch_size = batch_size if not debug else 10
self.vocab_size = vocab_size
# self.bhid = params['bhid']
# self.Vhid = dot(params['Vhid'], self.Wemb) # (500, vocab_size)
self.logger = set_logger(self.logPath, self.timestr, os.path.basename(__file__))
self.init_param()
# lstm = rnn.BasicLSTMCell(num_units=self.hidden_size, state_is_tuple=True)
# lstm = rnn.DropoutWrapper(cell=lstm, output_keep_prob=keep_prob)
# outputs, _states = rnn.static_rnn(lstm, z, dtype=tf.float32)
示例2: __init__
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import set_logger [as 別名]
def __init__(self, dataPath, savePath, paramSavePath, logPath, debug, split_percent, batch_size, timestr, timestep, window):
'''
* dataPath is way to find the data. We have two data files.
One is the real size as described in the paper.
Another is a much smaller dataset with 100 sentences
from both arXiv and book dataset used for early code test.
* debug is the indicator whether we are testing our code or real training.
default: debug = True, testing code mode.
# split_percent: training set : validation set : testing set
'''
self.debug = debug
self.savePath = savePath
self.dataPath = dataPath if not self.debug else '../data/data_pre.txt'
self.paramSavePath = paramSavePath
self.logger = set_logger(logPath, timestr, os.path.basename(__file__))
self.split_percent = split_percent
self.timestep = timestep
self.window = window
self.load_data()
# self.data is the list containing all the contents in data file
# self.sentSize: how many sentences.
self.clean_str()
self.word2num()
# self.dataArr: an np.ndarray version of self.data
# self.mapToNum is the word - index map. A word's index can be visited by self.mapToNum['word'].
# self.dataNum maps words in self.dataStr into number. (np.ndarray)
# self.vocabSize is vocabulary size
self.split_tvt()
# self.train training set
# self.validation validation set
# self.test testing set
# self.shift() Shift first 10% of self.dataNum and split tvt sets again.
self.batch_size = batch_size if not self.debug else 10
示例3: __init__
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import set_logger [as 別名]
def __init__(self, window, vocab_size, paramSavePath, logPath, input_dim, keep_prob, reuse, generator, timestr, debug):
# params = {'lambda_r': 0.001, 'lambda_m': 0.001, 'word_dim': 300}
self.name = 'd'
self.window = window
self.vocab_size = vocab_size
self.input_dim = input_dim
self.paramSavePath = paramSavePath
self.logPath = logPath
self.timestr = timestr
#self.cnn_out = tf.get_variable(name=self.name + '_f',
# shape=[],
# initializer=tf.zeros_initializer())
self.keep_prob = keep_prob
self.logger = set_logger(self.logPath, self.timestr, os.path.basename(__file__))
if reuse:
self.Wemb = generator.Wemb
else:
self.Wemb = tf.get_variable(name=self.name + '_Wemb', shape=[self.vocab_size, self.input_dim],
dtype=tf.float32, initializer=tf.random_uniform_initializer())
with tf.variable_scope('d'):
for i, n in enumerate(self.window):
W = tf.get_variable(name=self.name + '_W' + str(i),
shape=[n, 1, 1, 1],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable(name=self.name + '_b' + str(i),
shape=[1],
initializer=tf.zeros_initializer())
#c = tf.get_variable(name=self.name + '_c' + str(i), # c is each cnn_out
# shape=[-1, self.input_dim],
# initializer=tf.zeros_initializer())
開發者ID:Jeff-HOU,項目名稱:UROP-Adversarial-Feature-Matching-for-Text-Generation,代碼行數:32,代碼來源:discriminator.py