本文整理汇总了Python中attrdict.AttrDict.n_hidden_1方法的典型用法代码示例。如果您正苦于以下问题:Python AttrDict.n_hidden_1方法的具体用法?Python AttrDict.n_hidden_1怎么用?Python AttrDict.n_hidden_1使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类attrdict.AttrDict
的用法示例。
在下文中一共展示了AttrDict.n_hidden_1方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: initialize_globals
# 需要导入模块: from attrdict import AttrDict [as 别名]
# 或者: from attrdict.AttrDict import n_hidden_1 [as 别名]
def initialize_globals():
c = AttrDict()
# CPU device
c.cpu_device = '/cpu:0'
# Available GPU devices
c.available_devices = get_available_gpus()
# If there is no GPU available, we fall back to CPU based operation
if not c.available_devices:
c.available_devices = [c.cpu_device]
# Set default dropout rates
if FLAGS.dropout_rate2 < 0:
FLAGS.dropout_rate2 = FLAGS.dropout_rate
if FLAGS.dropout_rate3 < 0:
FLAGS.dropout_rate3 = FLAGS.dropout_rate
if FLAGS.dropout_rate6 < 0:
FLAGS.dropout_rate6 = FLAGS.dropout_rate
# Set default checkpoint dir
if not FLAGS.checkpoint_dir:
FLAGS.checkpoint_dir = xdg.save_data_path(os.path.join('deepspeech', 'checkpoints'))
if FLAGS.load not in ['last', 'best', 'init', 'auto']:
FLAGS.load = 'auto'
# Set default summary dir
if not FLAGS.summary_dir:
FLAGS.summary_dir = xdg.save_data_path(os.path.join('deepspeech', 'summaries'))
# Standard session configuration that'll be used for all new sessions.
c.session_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=FLAGS.log_placement,
inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,
intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads)
c.alphabet = Alphabet(os.path.abspath(FLAGS.alphabet_config_path))
# Geometric Constants
# ===================
# For an explanation of the meaning of the geometric constants, please refer to
# doc/Geometry.md
# Number of MFCC features
c.n_input = 26 # TODO: Determine this programmatically from the sample rate
# The number of frames in the context
c.n_context = 9 # TODO: Determine the optimal value using a validation data set
# Number of units in hidden layers
c.n_hidden = FLAGS.n_hidden
c.n_hidden_1 = c.n_hidden
c.n_hidden_2 = c.n_hidden
c.n_hidden_5 = c.n_hidden
# LSTM cell state dimension
c.n_cell_dim = c.n_hidden
# The number of units in the third layer, which feeds in to the LSTM
c.n_hidden_3 = c.n_cell_dim
# Units in the sixth layer = number of characters in the target language plus one
c.n_hidden_6 = c.alphabet.size() + 1 # +1 for CTC blank label
# Size of audio window in samples
c.audio_window_samples = FLAGS.audio_sample_rate * (FLAGS.feature_win_len / 1000)
# Stride for feature computations in samples
c.audio_step_samples = FLAGS.audio_sample_rate * (FLAGS.feature_win_step / 1000)
if FLAGS.one_shot_infer:
if not os.path.exists(FLAGS.one_shot_infer):
log_error('Path specified in --one_shot_infer is not a valid file.')
exit(1)
ConfigSingleton._config = c # pylint: disable=protected-access
示例2: initialize_globals
# 需要导入模块: from attrdict import AttrDict [as 别名]
# 或者: from attrdict.AttrDict import n_hidden_1 [as 别名]
def initialize_globals():
c = AttrDict()
# ps and worker hosts required for p2p cluster setup
FLAGS.ps_hosts = list(filter(len, FLAGS.ps_hosts.split(',')))
FLAGS.worker_hosts = list(filter(len, FLAGS.worker_hosts.split(',')))
# Create a cluster from the parameter server and worker hosts.
c.cluster = tf.train.ClusterSpec({'ps': FLAGS.ps_hosts, 'worker': FLAGS.worker_hosts})
# The absolute number of computing nodes - regardless of cluster or single mode
num_workers = max(1, len(FLAGS.worker_hosts))
# If replica numbers are negative, we multiply their absolute values with the number of workers
if FLAGS.replicas < 0:
FLAGS.replicas = num_workers * -FLAGS.replicas
if FLAGS.replicas_to_agg < 0:
FLAGS.replicas_to_agg = num_workers * -FLAGS.replicas_to_agg
# The device path base for this node
c.worker_device = '/job:%s/task:%d' % (FLAGS.job_name, FLAGS.task_index)
# This node's CPU device
c.cpu_device = c.worker_device + '/cpu:0'
# This node's available GPU devices
c.available_devices = [c.worker_device + gpu for gpu in get_available_gpus()]
# If there is no GPU available, we fall back to CPU based operation
if 0 == len(c.available_devices):
c.available_devices = [c.cpu_device]
# Set default dropout rates
if FLAGS.dropout_rate2 < 0:
FLAGS.dropout_rate2 = FLAGS.dropout_rate
if FLAGS.dropout_rate3 < 0:
FLAGS.dropout_rate3 = FLAGS.dropout_rate
if FLAGS.dropout_rate6 < 0:
FLAGS.dropout_rate6 = FLAGS.dropout_rate
# Set default checkpoint dir
if len(FLAGS.checkpoint_dir) == 0:
FLAGS.checkpoint_dir = xdg.save_data_path(os.path.join('deepspeech','checkpoints'))
# Set default summary dir
if len(FLAGS.summary_dir) == 0:
FLAGS.summary_dir = xdg.save_data_path(os.path.join('deepspeech','summaries'))
# Standard session configuration that'll be used for all new sessions.
c.session_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=FLAGS.log_placement,
inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,
intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads)
c.alphabet = Alphabet(os.path.abspath(FLAGS.alphabet_config_path))
# Geometric Constants
# ===================
# For an explanation of the meaning of the geometric constants, please refer to
# doc/Geometry.md
# Number of MFCC features
c.n_input = 26 # TODO: Determine this programmatically from the sample rate
# The number of frames in the context
c.n_context = 9 # TODO: Determine the optimal value using a validation data set
# Number of units in hidden layers
c.n_hidden = FLAGS.n_hidden
c.n_hidden_1 = c.n_hidden
c.n_hidden_2 = c.n_hidden
c.n_hidden_5 = c.n_hidden
# LSTM cell state dimension
c.n_cell_dim = c.n_hidden
# The number of units in the third layer, which feeds in to the LSTM
c.n_hidden_3 = c.n_cell_dim
# Units in the sixth layer = number of characters in the target language plus one
c.n_hidden_6 = c.alphabet.size() + 1 # +1 for CTC blank label
# Queues that are used to gracefully stop parameter servers.
# Each queue stands for one ps. A finishing worker sends a token to each queue before joining/quitting.
# Each ps will dequeue as many tokens as there are workers before joining/quitting.
# This ensures parameter servers won't quit, if still required by at least one worker and
# also won't wait forever (like with a standard `server.join()`).
done_queues = []
for i, ps in enumerate(FLAGS.ps_hosts):
# Queues are hosted by their respective owners
with tf.device('/job:ps/task:%d' % i):
done_queues.append(tf.FIFOQueue(1, tf.int32, shared_name=('queue%i' % i)))
# Placeholder to pass in the worker's index as token
c.token_placeholder = tf.placeholder(tf.int32)
# Enqueue operations for each parameter server
#.........这里部分代码省略.........