本文整理汇总了Python中visualizer.Visualizer方法的典型用法代码示例。如果您正苦于以下问题:Python visualizer.Visualizer方法的具体用法?Python visualizer.Visualizer怎么用?Python visualizer.Visualizer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类visualizer
的用法示例。
在下文中一共展示了visualizer.Visualizer方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: initialize
# 需要导入模块: import visualizer [as 别名]
# 或者: from visualizer import Visualizer [as 别名]
def initialize(self, opt):
self.opt = opt
self.visual = Visualizer()
self.visual.initialize(self.opt)
示例2: gtsrb_visualize_label_scan_bottom_right_white_4
# 需要导入模块: import visualizer [as 别名]
# 或者: from visualizer import Visualizer [as 别名]
def gtsrb_visualize_label_scan_bottom_right_white_4():
print('loading dataset')
X_test, Y_test = load_dataset()
# transform numpy arrays into data generator
test_generator = build_data_loader(X_test, Y_test)
print('loading model')
model_file = '%s/%s' % (MODEL_DIR, MODEL_FILENAME)
model = load_model(model_file)
# initialize visualizer
visualizer = Visualizer(
model, intensity_range=INTENSITY_RANGE, regularization=REGULARIZATION,
input_shape=INPUT_SHAPE,
init_cost=INIT_COST, steps=STEPS, lr=LR, num_classes=NUM_CLASSES,
mini_batch=MINI_BATCH,
upsample_size=UPSAMPLE_SIZE,
attack_succ_threshold=ATTACK_SUCC_THRESHOLD,
patience=PATIENCE, cost_multiplier=COST_MULTIPLIER,
img_color=IMG_COLOR, batch_size=BATCH_SIZE, verbose=2,
save_last=SAVE_LAST,
early_stop=EARLY_STOP, early_stop_threshold=EARLY_STOP_THRESHOLD,
early_stop_patience=EARLY_STOP_PATIENCE)
log_mapping = {}
# y_label list to analyze
y_target_list = list(range(NUM_CLASSES))
y_target_list.remove(Y_TARGET)
y_target_list = [Y_TARGET] + y_target_list
for y_target in y_target_list:
print('processing label %d' % y_target)
_, _, logs = visualize_trigger_w_mask(
visualizer, test_generator, y_target=y_target,
save_pattern_flag=True)
log_mapping[y_target] = logs
pass
示例3: __init__
# 需要导入模块: import visualizer [as 别名]
# 或者: from visualizer import Visualizer [as 别名]
def __init__(self, rl_method='rl', stock_code=None,
chart_data=None, training_data=None,
min_trading_unit=1, max_trading_unit=2,
delayed_reward_threshold=.05,
net='dnn', num_steps=1, lr=0.001,
value_network=None, policy_network=None,
output_path='', reuse_models=True):
# 인자 확인
assert min_trading_unit > 0
assert max_trading_unit > 0
assert max_trading_unit >= min_trading_unit
assert num_steps > 0
assert lr > 0
# 강화학습 기법 설정
self.rl_method = rl_method
# 환경 설정
self.stock_code = stock_code
self.chart_data = chart_data
self.environment = Environment(chart_data)
# 에이전트 설정
self.agent = Agent(self.environment,
min_trading_unit=min_trading_unit,
max_trading_unit=max_trading_unit,
delayed_reward_threshold=delayed_reward_threshold)
# 학습 데이터
self.training_data = training_data
self.sample = None
self.training_data_idx = -1
# 벡터 크기 = 학습 데이터 벡터 크기 + 에이전트 상태 크기
self.num_features = self.agent.STATE_DIM
if self.training_data is not None:
self.num_features += self.training_data.shape[1]
# 신경망 설정
self.net = net
self.num_steps = num_steps
self.lr = lr
self.value_network = value_network
self.policy_network = policy_network
self.reuse_models = reuse_models
# 가시화 모듈
self.visualizer = Visualizer()
# 메모리
self.memory_sample = []
self.memory_action = []
self.memory_reward = []
self.memory_value = []
self.memory_policy = []
self.memory_pv = []
self.memory_num_stocks = []
self.memory_exp_idx = []
self.memory_learning_idx = []
# 에포크 관련 정보
self.loss = 0.
self.itr_cnt = 0
self.exploration_cnt = 0
self.batch_size = 0
self.learning_cnt = 0
# 로그 등 출력 경로
self.output_path = output_path