本文整理汇总了Python中tflearn.is_training方法的典型用法代码示例。如果您正苦于以下问题:Python tflearn.is_training方法的具体用法?Python tflearn.is_training怎么用?Python tflearn.is_training使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tflearn
的用法示例。
在下文中一共展示了tflearn.is_training方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: evaluate_flow
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import is_training [as 别名]
def evaluate_flow(session, ops_to_evaluate, dataflow):
if not isinstance(ops_to_evaluate, list):
ops_to_evaluate = [ops_to_evaluate]
tflearn.is_training(False, session)
dataflow.reset()
dataflow.start()
res = [0. for i in ops_to_evaluate]
feed_batch = dataflow.next()
while feed_batch:
r = session.run(ops_to_evaluate, feed_batch)
current_batch_size = get_current_batch_size(feed_batch, dataflow)
for i in range(len(r)):
res[i] += r[i] * current_batch_size
feed_batch = dataflow.next()
res = [r / dataflow.n_samples for r in res]
return res
示例2: act
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import is_training [as 别名]
def act(self, test=False):
with self.sess.as_default():
print('--- Selecting action, test={}'.format(test))
obs = np.expand_dims(self.observation, axis=0)
if FLAGS.icnn_opt == 'adam':
f = self._fg_entr
# f = self._fg
elif FLAGS.icnn_opt == 'bundle_entropy':
f = self._fg
else:
raise RuntimeError("Unrecognized ICNN optimizer: "+FLAGS.icnn_opt)
tflearn.is_training(False)
action = self.opt(f, obs)
tflearn.is_training(not test)
if not test:
self.noise -= FLAGS.outheta*self.noise - \
FLAGS.ousigma*npr.randn(self.dimA)
action += self.noise
action = np.clip(action, -1, 1)
self.action = np.atleast_1d(np.squeeze(action, axis=0))
return self.action
示例3: train
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import is_training [as 别名]
def train(self):
with self.sess.as_default():
obs, act, rew, ob2, term2, info = self.rm.minibatch(size=FLAGS.bsize)
if FLAGS.icnn_opt == 'adam':
# f = self._opt_train_entr
f = self._fg_entr_target
# f = self._fg_target
elif FLAGS.icnn_opt == 'bundle_entropy':
f = self._fg_target
else:
raise RuntimeError("Unrecognized ICNN optimizer: "+FLAGS.icnn_opt)
print('--- Optimizing for training')
tflearn.is_training(False)
act2 = self.opt(f, ob2)
tflearn.is_training(True)
_, _, loss = self._train(obs, act, rew, ob2, act2, term2,
log=FLAGS.summary, global_step=self.t)
self.sess.run(self.proj)
return loss
示例4: log_between_steps
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import is_training [as 别名]
def log_between_steps(self, step):
fast_train = self.train_config["fast_train"]
tflearn.is_training(False, self._agent.session)
summary, v_pv, v_log_mean, v_loss, log_mean_free, weights= \
self._evaluate("test", self.summary,
self._agent.portfolio_value,
self._agent.log_mean,
self._agent.loss,
self._agent.log_mean_free,
self._agent.portfolio_weights)
self.test_writer.add_summary(summary, step)
if not fast_train:
summary, loss_value = self._evaluate("training", self.summary, self._agent.loss)
self.train_writer.add_summary(summary, step)
# print 'ouput is %s' % out
logging.info('='*30)
logging.info('step %d' % step)
logging.info('-'*30)
if not fast_train:
logging.info('training loss is %s\n' % loss_value)
logging.info('the portfolio value on test set is %s\nlog_mean is %s\n'
'loss_value is %3f\nlog mean without commission fee is %3f\n' % \
(v_pv, v_log_mean, v_loss, log_mean_free))
logging.info('='*30+"\n")
if not self.__snap_shot:
self._agent.save_model(self.save_path)
elif v_pv > self.best_metric:
self.best_metric = v_pv
logging.info("get better model at %s steps,"
" whose test portfolio value is %s" % (step, v_pv))
if self.save_path:
self._agent.save_model(self.save_path)
self.check_abnormal(v_pv, weights)
示例5: __log_result_csv
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import is_training [as 别名]
def __log_result_csv(self, index, time):
from pgportfolio.trade import backtest
dataframe = None
csv_dir = './train_package/train_summary.csv'
tflearn.is_training(False, self._agent.session)
v_pv, v_log_mean, benefit_array, v_log_mean_free =\
self._evaluate("test",
self._agent.portfolio_value,
self._agent.log_mean,
self._agent.pv_vector,
self._agent.log_mean_free)
backtest = backtest.BackTest(self.config.copy(),
net_dir=None,
agent=self._agent)
backtest.start_trading()
result = Result(test_pv=[v_pv],
test_log_mean=[v_log_mean],
test_log_mean_free=[v_log_mean_free],
test_history=[''.join(str(e)+', ' for e in benefit_array)],
config=[json.dumps(self.config)],
net_dir=[index],
backtest_test_pv=[backtest.test_pv],
backtest_test_history=[''.join(str(e)+', ' for e in backtest.test_pc_vector)],
backtest_test_log_mean=[np.mean(np.log(backtest.test_pc_vector))],
training_time=int(time))
new_data_frame = pd.DataFrame(result._asdict()).set_index("net_dir")
if os.path.isfile(csv_dir):
dataframe = pd.read_csv(csv_dir).set_index("net_dir")
dataframe = dataframe.append(new_data_frame)
else:
dataframe = new_data_frame
if int(index) > 0:
dataframe.to_csv(csv_dir)
return result
示例6: __rolling_logging
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import is_training [as 别名]
def __rolling_logging(self):
fast_train = self.train_config["fast_train"]
if not fast_train:
tflearn.is_training(False, self._agent.session)
v_pv, v_log_mean = self._evaluate("validation",
self._agent.portfolio_value,
self._agent.log_mean)
t_pv, t_log_mean = self._evaluate("test", self._agent.portfolio_value, self._agent.log_mean)
loss_value = self._evaluate("training", self._agent.loss)
logging.info('training loss is %s\n' % loss_value)
logging.info('the portfolio value on validation asset is %s\nlog_mean is %s\n' %
(v_pv,v_log_mean))
logging.info('the portfolio value on test asset is %s\n mean is %s' % (t_pv,t_log_mean))
示例7: train
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import is_training [as 别名]
def train(self, x, y, last_w, setw):
tflearn.is_training(True, self.__net.session)
self.evaluate_tensors(x, y, last_w, setw, [self.__train_operation])
示例8: decide_by_history
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import is_training [as 别名]
def decide_by_history(self, history, last_w):
assert isinstance(history, np.ndarray),\
"the history should be a numpy array, not %s" % type(history)
assert not np.any(np.isnan(last_w))
assert not np.any(np.isnan(history))
tflearn.is_training(False, self.session)
history = history[np.newaxis, :, :, :]
return np.squeeze(self.session.run(self.__net.output, feed_dict={self.__net.input_tensor: history,
self.__net.previous_w: last_w[np.newaxis, 1:],
self.__net.input_num: 1}))
示例9: _train_batch
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import is_training [as 别名]
def _train_batch(self, feed_dict):
""" _train_batch.
Train on a single batch.
Arguments:
feed_dict: `dict`. The data dictionary to feed.
"""
tflearn.is_training(True, session=self.session)
_, loss, _ = self.session.run([self.train, self.loss, self.summ_op],
feed_dict=feed_dict)
tflearn.is_training(False, session=self.session)
return loss
示例10: evaluate
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import is_training [as 别名]
def evaluate(session, op_to_evaluate, feed_dict, batch_size):
""" evaluate.
Evaluate an operation with provided data dict using a batch size
to save GPU memory.
Args:
session: `tf.Session`. Session for running operations.
op_to_evaluate: `tf.Op`. Operation to be evaluated.
feed_dict: `dict`. Data dictionary to feed op_to_evaluate.
batch_size: `int`. Batch size to be used for evaluation.
Ret:
`float`. op_to_evaluate mean over all batches.
"""
tflearn.is_training(False, session)
n_test_samples = len(get_dict_first_element(feed_dict))
batches = make_batches(n_test_samples, batch_size)
index_array = np.arange(n_test_samples)
avg = 0.0
for i, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
feed_batch = {}
for key in feed_dict:
# Make batch for multi-dimensional data
if np.ndim(feed_dict[key]) > 0:
feed_batch[key] = slice_array(feed_dict[key], batch_ids)
else:
feed_batch[key] = feed_dict[key]
avg += session.run(op_to_evaluate, feed_batch) / len(batches)
return avg
示例11: predict
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import is_training [as 别名]
def predict(self, feed_dict):
""" predict.
Run data through the provided network and return the result value.
Arguments:
feed_dict: `dict`. Feed data dictionary, with placeholders as
keys, and data as values.
Returns:
An `array`. In case of multiple tensors to predict, each tensor's
prediction result is concatenated.
"""
with self.graph.as_default():
# Data Preprocessing
dprep_dict = dict()
for i in range(len(self.inputs)):
# Support for custom inputs not using dprep/daug
if len(self.dprep_collection) > i:
if self.dprep_collection[i] is not None:
dprep_dict[self.inputs[i]] = self.dprep_collection[i]
# Apply pre-processing
if len(dprep_dict) > 0:
for k in dprep_dict:
feed_dict[k] = dprep_dict[k].apply(feed_dict[k])
# Prediction for each tensor
tflearn.is_training(False, self.session)
prediction = []
if len(self.tensors) == 1:
return self.session.run(self.tensors[0], feed_dict=feed_dict)
else:
for output in self.tensors:
o_pred = self.session.run(output, feed_dict=feed_dict).tolist()
for i, val in enumerate(o_pred): # Reshape pred per sample
if len(self.tensors) > 1:
if not len(prediction) > i: prediction.append([])
prediction[i].append(val)
return prediction
示例12: evaluate
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import is_training [as 别名]
def evaluate(self, feed_dict, ops, batch_size=128):
""" Evaluate.
Evaluate a list of tensors over a whole dataset. Generally,
'ops' argument are average performance metrics (such as average mean,
top-3, etc...)
Arguments:
feed_dict: `dict`. The feed dictionary of data.
ops: list of `Tensors`. The tensors to evaluate.
batch_size: `int`. A batch size.
Returns:
The mean average result per tensor over all batches.
"""
tflearn.is_training(False, self.session)
coord = tf.train.Coordinator()
inputs = tf.get_collection(tf.GraphKeys.INPUTS)
# Data Preprocessing
dprep_dict = {}
dprep_collection = tf.get_collection(tf.GraphKeys.DATA_PREP)
for i in range(len(inputs)):
# Support for custom inputs not using dprep/daug
if len(dprep_collection) > i:
if dprep_collection[i] is not None:
dprep_dict[inputs[i]] = dprep_collection[i]
# Data Flow
df = data_flow.FeedDictFlow(feed_dict, coord,
batch_size=batch_size,
dprep_dict=dprep_dict,
daug_dict=None,
index_array=None,
num_threads=1)
return evaluate_flow(self.session, ops, df)
示例13: validate
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import is_training [as 别名]
def validate(self, sess, generator, keys=None, summary=False, predict=False, show_tqdm=False):
if keys is None:
keys = ['dice_score', 'landmark_dist', 'pt_mask', 'jacc_score']
# if self.segmentation_class_value is not None:
# for k in self.segmentation_class_value:
# keys.append('jacc_{}'.format(k))
full_results = dict([(k, list()) for k in keys])
if not summary:
full_results['id1'] = []
full_results['id2'] = []
if predict:
full_results['seg1'] = []
full_results['seg2'] = []
full_results['img1'] = []
full_results['img2'] = []
tflearn.is_training(False, sess)
if show_tqdm:
generator = tqdm(generator)
for fd in generator:
id1 = fd.pop('id1')
id2 = fd.pop('id2')
results = sess.run(self.get_predictions(
*keys), feed_dict=set_tf_keys(fd))
if not summary:
results['id1'] = id1
results['id2'] = id2
if predict:
results['seg1'] = fd['seg1']
results['seg2'] = fd['seg2']
results['img1'] = fd['voxel1']
results['img2'] = fd['voxel2']
mask = np.where([i and j for i, j in zip(id1, id2)])
for k, v in results.items():
full_results[k].append(v[mask])
if 'landmark_dist' in full_results and 'pt_mask' in full_results:
pt_mask = full_results.pop('pt_mask')
full_results['landmark_dist'] = [arr * mask for arr,
mask in zip(full_results['landmark_dist'], pt_mask)]
for k in full_results:
full_results[k] = np.concatenate(full_results[k], axis=0)
if summary:
full_results[k] = full_results[k].mean()
return full_results
示例14: train
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import is_training [as 别名]
def train(self, args, dataX, dataY):
save = os.path.join(os.path.expanduser(args.save),
"{}.{}".format(args.model, args.dataset))
nTrain = dataX.shape[0]
imgDir = os.path.join(save, 'imgs')
if not os.path.exists(imgDir):
os.makedirs(imgDir)
trainFields = ['iter', 'loss']
trainF = open(os.path.join(save, 'train.csv'), 'w')
trainW = csv.writer(trainF)
trainW.writerow(trainFields)
self.trainWriter = tf.train.SummaryWriter(os.path.join(save, 'train'),
self.sess.graph)
self.sess.run(tf.initialize_all_variables())
if not args.noncvx:
self.sess.run(self.makeCvx)
nParams = np.sum(v.get_shape().num_elements() for v in tf.trainable_variables())
meta = {'nTrain': nTrain, 'nParams': nParams, 'nEpoch': args.nEpoch}
metaP = os.path.join(save, 'meta.json')
with open(metaP, 'w') as f:
json.dump(meta, f, indent=2)
bestMSE = None
for i in range(args.nEpoch):
tflearn.is_training(True)
print("=== Epoch {} ===".format(i))
start = time.time()
y0 = np.full(dataY.shape, 0.5)
_, trainMSE, yn = self.sess.run(
[self.train_step, self.mse_, self.yn_],
feed_dict={self.x_: dataX, self.y0_: y0, self.trueY_: dataY})
if not args.noncvx and len(self.proj) > 0:
self.sess.run(self.proj)
trainW.writerow((i, trainMSE))
trainF.flush()
print(" + loss: {:0.5e}".format(trainMSE))
print(" + time: {:0.2f} s".format(time.time()-start))
if i % 10 == 0:
loc = "{}/{:05d}".format(imgDir, i)
self.plot(loc, dataX, dataY)
if bestMSE is None or trainMSE < bestMSE:
loc = os.path.join(save, 'best')
self.plot(loc, dataX, dataY)
bestMSE = trainMSE
trainF.close()