当前位置: 首页>>代码示例>>Python>>正文


Python sonnet.BatchFlatten方法代码示例

本文整理汇总了Python中sonnet.BatchFlatten方法的典型用法代码示例。如果您正苦于以下问题:Python sonnet.BatchFlatten方法的具体用法?Python sonnet.BatchFlatten怎么用?Python sonnet.BatchFlatten使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sonnet的用法示例。


在下文中一共展示了sonnet.BatchFlatten方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: custom_build

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchFlatten [as 别名]
def custom_build(inputs, is_training, keep_prob):
  x_inputs = tf.reshape(inputs, [-1, 28, 28, 1])
  """A custom build method to wrap into a sonnet Module."""
  outputs = snt.Conv2D(output_channels=32, kernel_shape=4, stride=2)(x_inputs)
  outputs = snt.BatchNorm()(outputs, is_training=is_training)
  outputs = tf.nn.relu(outputs)
  outputs = tf.nn.max_pool(outputs, ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1], padding='SAME')
  outputs = snt.Conv2D(output_channels=64, kernel_shape=4, stride=2)(outputs)
  outputs = snt.BatchNorm()(outputs, is_training=is_training)
  outputs = tf.nn.relu(outputs)
  outputs = tf.nn.max_pool(outputs, ksize=[1, 2, 2, 1],
                           strides=[1, 2, 2, 1], padding='SAME')
  outputs = snt.Conv2D(output_channels=1024, kernel_shape=1, stride=1)(outputs)
  outputs = snt.BatchNorm()(outputs, is_training=is_training)
  outputs = tf.nn.relu(outputs)
  outputs = snt.BatchFlatten()(outputs)
  outputs = tf.nn.dropout(outputs, keep_prob=keep_prob)
  outputs = snt.Linear(output_size=10)(outputs)
#  _activation_summary(outputs)
  return outputs 
开发者ID:normanheckscher,项目名称:mnist-multi-gpu,代码行数:23,代码来源:mnist_multi_gpu_sonnet.py

示例2: custom_build

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchFlatten [as 别名]
def custom_build(self, inputs):
        """A custom build method to wrap into a sonnet Module."""
        outputs = snt.Conv2D(output_channels=16, kernel_shape=[7, 7], stride=[1, 1])(inputs)
        outputs = tf.nn.relu(outputs)
        outputs = snt.Conv2D(output_channels=16, kernel_shape=[5, 5], stride=[1, 2])(outputs)
        outputs = tf.nn.relu(outputs)
        outputs = snt.Conv2D(output_channels=16, kernel_shape=[5, 5], stride=[1, 2])(outputs)
        outputs = tf.nn.relu(outputs)
        outputs = snt.Conv2D(output_channels=16, kernel_shape=[5, 5], stride=[2, 2])(outputs)
        outputs = tf.nn.relu(outputs)
        outputs = tf.nn.dropout(outputs,  self.placeholders['keep_prob'])
        outputs = snt.BatchFlatten()(outputs)
        outputs = snt.Linear(128)(outputs)
        outputs = tf.nn.relu(outputs)

        return outputs 
开发者ID:tu-rbo,项目名称:differentiable-particle-filters,代码行数:18,代码来源:dpf_kitti.py

示例3: _torso

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchFlatten [as 别名]
def _torso(self, input_):
    """Processing of all the visual and language inputs to the LSTM core."""

    # Extract the inputs
    last_action, env_output = input_
    last_reward, _, _, observation = env_output
    frame = observation[self._idx_frame]
    goal = observation[self._idx_goal]
    goal = tf.to_float(goal)

    # Convert to image to floats and normalise.
    frame = tf.to_float(frame)
    frame = snt.FlattenTrailingDimensions(dim_from=3)(frame)
    frame /= 255.0

    # Feed image through convnet.
    with tf.variable_scope('convnet'):
      # Convolutional layers.
      conv_out = self._convnet(frame)
      # Fully connected layer.
      conv_out = snt.BatchFlatten()(conv_out)
      conv_out = snt.Linear(256)(conv_out)
      conv_out = tf.nn.relu(conv_out)

    # Concatenate outputs of the visual and instruction pathways.
    if self._feed_action_and_reward:
      # Append clipped last reward and one hot last action.
      tf.logging.info('Append last reward clipped to: %f', self._max_reward)
      clipped_last_reward = tf.expand_dims(
          tf.clip_by_value(last_reward, -self._max_reward, self._max_reward),
          -1)
      tf.logging.info('Append last action (one-hot of %d)', self._num_actions)
      one_hot_last_action = tf.one_hot(last_action, self._num_actions)
      tf.logging.info('Append goal:')
      tf.logging.info(goal)
      action_and_reward = tf.concat([clipped_last_reward, one_hot_last_action],
                                    axis=1)
    else:
      action_and_reward = tf.constant([0], dtype=tf.float32)
    return conv_out, action_and_reward, goal 
开发者ID:deepmind,项目名称:streetlearn,代码行数:42,代码来源:goal_nav_agent.py

示例4: _build

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchFlatten [as 别名]
def _build(self, inputs):

    if FLAGS.l2_reg:
      regularizers = {'w': lambda w: FLAGS.l2_reg*tf.nn.l2_loss(w),
                      'b': lambda w: FLAGS.l2_reg*tf.nn.l2_loss(w),}
    else:
      regularizers = None

    reshape = snt.BatchReshape([28, 28, 1])

    conv = snt.Conv2D(2, 5, padding=snt.SAME, regularizers=regularizers)
    act = _NONLINEARITY(conv(reshape(inputs)))

    pool = tf.nn.pool(act, window_shape=(2, 2), pooling_type=_POOL,
                      padding=snt.SAME, strides=(2, 2))

    conv = snt.Conv2D(4, 5, padding=snt.SAME, regularizers=regularizers)
    act = _NONLINEARITY(conv(pool))

    pool = tf.nn.pool(act, window_shape=(2, 2), pooling_type=_POOL,
                      padding=snt.SAME, strides=(2, 2))

    flatten = snt.BatchFlatten()(pool)

    linear = snt.Linear(32, regularizers=regularizers)(flatten)

    return snt.Linear(10, regularizers=regularizers)(linear) 
开发者ID:tensorflow,项目名称:kfac,代码行数:29,代码来源:classifier_mnist.py

示例5: test_train

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchFlatten [as 别名]
def test_train(self):
    image = tf.random_uniform(shape=(_BATCH_SIZE, 784), maxval=1.)
    labels = tf.random_uniform(shape=(_BATCH_SIZE,), maxval=10, dtype=tf.int32)
    labels_one_hot = tf.one_hot(labels, 10)

    model = snt.Sequential([snt.BatchFlatten(), snt.nets.MLP([128, 128, 10])])
    logits = model(image)
    all_losses = tf.nn.softmax_cross_entropy_with_logits_v2(
        logits=logits, labels=labels_one_hot)
    loss = tf.reduce_mean(all_losses)
    layers = layer_collection.LayerCollection()
    optimizer = periodic_inv_cov_update_kfac_opt.PeriodicInvCovUpdateKfacOpt(
        invert_every=10,
        cov_update_every=1,
        learning_rate=0.03,
        cov_ema_decay=0.95,
        damping=100.,
        layer_collection=layers,
        momentum=0.9,
        num_burnin_steps=0,
        placement_strategy="round_robin")
    _construct_layer_collection(layers, [logits], tf.trainable_variables())

    train_step = optimizer.minimize(loss)
    counter = optimizer.counter
    max_iterations = 50

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      coord = tf.train.Coordinator()
      tf.train.start_queue_runners(sess=sess, coord=coord)
      for iteration in range(max_iterations):
        sess.run([loss, train_step])
        counter_ = sess.run(counter)
        self.assertEqual(counter_, iteration + 1.0) 
开发者ID:tensorflow,项目名称:kfac,代码行数:37,代码来源:periodic_inv_cov_update_kfac_opt_test.py

示例6: __init__

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchFlatten [as 别名]
def __init__(self, module):
    if not isinstance(module, snt.BatchFlatten):
      raise ValueError('Cannot wrap {} with a BatchFlattenWrapper.'.format(
          module))
    super(BatchFlattenWrapper, self).__init__(module, [-1]) 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:7,代码来源:verifiable_wrapper.py

示例7: _initial_symbolic_bounds

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchFlatten [as 别名]
def _initial_symbolic_bounds(lb, ub):
    """Returns symbolic bounds for the given interval bounds."""
    batch_size = tf.shape(lb)[0]
    input_shape = lb.shape[1:]
    zero = tf.zeros_like(lb)
    lb = snt.BatchFlatten()(lb)
    ub = snt.BatchFlatten()(ub)
    input_size = tf.shape(lb)[1]
    output_shape = tf.concat([[input_size], input_shape], axis=0)
    identity = tf.reshape(tf.eye(input_size), output_shape)
    identity = tf.expand_dims(identity, 0)
    identity = tf.tile(identity, [batch_size] + [1] * (len(input_shape) + 1))
    expr = LinearExpression(w=identity, b=zero,
                            lower=lb, upper=ub)
    return expr, expr 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:17,代码来源:fastlin.py

示例8: testVerifiableModelWrapperDNN

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchFlatten [as 别名]
def testVerifiableModelWrapperDNN(self):
    predictor = _build_model()
    # Input.
    z = tf.constant([1, 2, 3, 4], dtype=tf.float32)
    z = tf.reshape(z, [1, 2, 2, 1])
    wrapper = ibp.VerifiableModelWrapper(predictor)
    wrapper(z)
    # Verify basic wrapping.
    self.assertEqual(predictor, wrapper.wrapped_network)
    self.assertEqual(3, wrapper.output_size)
    self.assertEqual((1, 3), tuple(wrapper.logits.shape.as_list()))
    self.assertEqual(z, wrapper.inputs)
    # Build another input and test reuse.
    z2 = tf.constant([1, 2, 3, 4], dtype=tf.float32)
    z2 = tf.reshape(z, [1, 2, 2, 1])
    logits = wrapper(z2, reuse=True)
    self.assertEqual(z, wrapper.inputs)
    self.assertNotEqual(z2, wrapper.inputs)
    # Check that the verifiable modules are constructed.
    self.assertLen(wrapper.input_wrappers, 1)
    self.assertLen(wrapper.modules, 6)
    self.assertIsInstance(wrapper.modules[0].module, snt.Conv2D)
    self.assertEqual(wrapper.modules[1].module, tf.nn.relu)
    self.assertIsInstance(wrapper.modules[2].module, snt.BatchFlatten)
    self.assertIsInstance(wrapper.modules[3].module, snt.Linear)
    self.assertEqual(wrapper.modules[4].module, tf.nn.relu)
    self.assertIsInstance(wrapper.modules[5].module, snt.Linear)
    # It's a sequential network, so all nodes (including input) have fanout 1.
    self.assertEqual(wrapper.fanout_of(wrapper.input_wrappers[0]), 1)
    for module in wrapper.modules:
      self.assertEqual(wrapper.fanout_of(module), 1)
    # Check propagation.
    self._propagation_test(wrapper, z2, logits) 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:35,代码来源:model_test.py

示例9: testPointlessReshape

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchFlatten [as 别名]
def testPointlessReshape(self):
    def _build(z0):
      z = snt.Linear(10)(z0)
      z = snt.BatchFlatten()(z)  # This is a no-op; no graph nodes created.
      return snt.Linear(2)(z)

    z = tf.constant([[1, 2, 3, 4]], dtype=tf.float32)
    wrapper = ibp.VerifiableModelWrapper(_build)
    logits = wrapper(z)
    # Expect the batch flatten to have been skipped.
    self.assertLen(wrapper.modules, 2)
    self.assertIsInstance(wrapper.modules[0], ibp.LinearFCWrapper)
    self.assertIsInstance(wrapper.modules[1], ibp.LinearFCWrapper)
    # Check propagation.
    self._propagation_test(wrapper, z, logits) 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:17,代码来源:model_test.py

示例10: load

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchFlatten [as 别名]
def load(config, **inputs):

    imgs, labels = inputs['train_img'], inputs['train_label']

    imgs = snt.BatchFlatten()(imgs)
    mlp = snt.nets.MLP([config.n_hidden, 10])
    logits = mlp(imgs)
    labels = tf.cast(labels, tf.int32)

    loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels))

    pred_class = tf.argmax(logits, -1)
    acc = tf.reduce_mean(tf.to_float(tf.equal(tf.to_int32(pred_class), labels)))

    # put here everything that you might want to use later
    # for example when you load the model in a jupyter notebook
    artefects = {
        'mlp': mlp,
        'logits': logits,
        'loss': loss,
        'pred_class': pred_class,
        'accuracy': acc
    }

    # put here everything that you'd like to be reported every N training iterations
    # as tensorboard logs AND on the command line
    stats = {'crossentropy': loss, 'accuracy': acc}

    # loss will be minimized with respect to the model parameters
    return loss, stats, artefects 
开发者ID:akosiorek,项目名称:forge,代码行数:32,代码来源:mnist_mlp.py

示例11: _build

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchFlatten [as 别名]
def _build(self, inp):
        input_sizes = snt.nest.map(lambda inp_i: inp_i.get_shape()[1:], inp)
        self._merge_input_sizes(input_sizes)
        flatten = snt.BatchFlatten(preserve_dims=1)
        flat_inp = snt.nest.map(lambda inp_i: tf.to_float(flatten(inp_i)), inp)
        ret = util.concat_features(flat_inp)
        util.set_tensor_shapes(ret, self.output_size, add_batch_dims=1)
        return ret 
开发者ID:google,项目名称:vae-seq,代码行数:10,代码来源:codec.py

示例12: mnist

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchFlatten [as 别名]
def mnist(layers,  # pylint: disable=invalid-name
          activation="sigmoid",
          batch_size=128,
          mode="train"):
  """Mnist classification with a multi-layer perceptron."""

  if activation == "sigmoid":
    activation_op = tf.sigmoid
  elif activation == "relu":
    activation_op = tf.nn.relu
  else:
    raise ValueError("{} activation not supported".format(activation))

  # Data.
  data = mnist_dataset.load_mnist()
  data = getattr(data, mode)
  images = tf.constant(data.images, dtype=tf.float32, name="MNIST_images")
  images = tf.reshape(images, [-1, 28, 28, 1])
  labels = tf.constant(data.labels, dtype=tf.int64, name="MNIST_labels")

  # Network.
  mlp = snt.nets.MLP(list(layers) + [10],
                     activation=activation_op,
                     initializers=_nn_initializers)
  network = snt.Sequential([snt.BatchFlatten(), mlp])

  def build():
    indices = tf.random_uniform([batch_size], 0, data.num_examples, tf.int64)
    batch_images = tf.gather(images, indices)
    batch_labels = tf.gather(labels, indices)
    output = network(batch_images)
    return _xent_loss(output, batch_labels)

  return build 
开发者ID:deepmind,项目名称:learning-to-learn,代码行数:36,代码来源:problems.py

示例13: __init__

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchFlatten [as 别名]
def __init__(self, init_with_true_state=False, model='2lstm', **unused_kwargs):

        self.placeholders = {'o': tf.placeholder('float32', [None, None, 24, 24, 3], 'observations'),
                     'a': tf.placeholder('float32', [None, None, 3], 'actions'),
                     's': tf.placeholder('float32', [None, None, 3], 'states'),
                     'keep_prob': tf.placeholder('float32')}
        self.pred_states = None
        self.init_with_true_state = init_with_true_state
        self.model = model

        # build models
        # <-- observation
        self.encoder = snt.Sequential([
            snt.nets.ConvNet2D([16, 32, 64], [[3, 3]], [2], [snt.SAME], activate_final=True, name='encoder/convnet'),
            snt.BatchFlatten(),
            lambda x: tf.nn.dropout(x, self.placeholders['keep_prob']),
            snt.Linear(128, name='encoder/Linear'),
            tf.nn.relu,
        ])

        # <-- action
        if self.model == '2lstm':
            self.rnn1 = snt.LSTM(512)
            self.rnn2 = snt.LSTM(512)
        if self.model == '2gru':
            self.rnn1 = snt.GRU(512)
            self.rnn2 = snt.GRU(512)
        elif self.model == 'ff':
            self.ff_lstm_replacement = snt.Sequential([
                snt.Linear(512),
                tf.nn.relu,
                snt.Linear(512),
                tf.nn.relu])

        self.belief_decoder = snt.Sequential([
            snt.Linear(256),
            tf.nn.relu,
            snt.Linear(256),
            tf.nn.relu,
            snt.Linear(3)
        ]) 
开发者ID:tu-rbo,项目名称:differentiable-particle-filters,代码行数:43,代码来源:rnn.py

示例14: connect_modules

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchFlatten [as 别名]
def connect_modules(self, means, stds, state_mins, state_maxs, state_step_sizes):

        # tracking_info_full = tf.tile(((self.placeholders['s'] - means['s']) / stds['s'])[:, :1, :], [1, tf.shape(self.placeholders['s'])[1], 1])
        tracking_info = tf.concat([((self.placeholders['s'] - means['s']) / stds['s'])[:, :1, :], tf.zeros_like(self.placeholders['s'][:,1:,:])], axis=1)
        flag = tf.concat([tf.ones_like(self.placeholders['s'][:,:1,:1]), tf.zeros_like(self.placeholders['s'][:,1:,:1])], axis=1)

        preproc_o = snt.BatchApply(self.encoder)((self.placeholders['o'] - means['o']) / stds['o'])
        # include tracking info
        if self.init_with_true_state:
            # preproc_o = tf.concat([preproc_o, tracking_info, flag], axis=2)
            preproc_o = tf.concat([preproc_o, tracking_info, flag], axis=2)
            # preproc_o = tf.concat([preproc_o, tracking_info_full], axis=2)

        preproc_a = snt.BatchApply(snt.BatchFlatten())(self.placeholders['a'] / stds['a'])
        preproc_ao = tf.concat([preproc_o, preproc_a], axis=-1)

        if self.model == '2lstm' or self.model == '2gru':
            lstm1_out, lstm1_final_state = tf.nn.dynamic_rnn(self.rnn1, preproc_ao, dtype=tf.float32)
            lstm2_out, lstm2_final_state = tf.nn.dynamic_rnn(self.rnn2, lstm1_out, dtype=tf.float32)
            belief_list = lstm2_out

        elif self.model == 'ff':
            belief_list = snt.BatchApply(self.ff_lstm_replacement)(preproc_ao)

        self.pred_states = snt.BatchApply(self.belief_decoder)(belief_list)
        self.pred_states = self.pred_states * stds['s'] + means['s'] 
开发者ID:tu-rbo,项目名称:differentiable-particle-filters,代码行数:28,代码来源:rnn.py

示例15: flat_reduce

# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import BatchFlatten [as 别名]
def flat_reduce(tensor, reduce_type='sum', final_reduce='mean'):
  """Flattens the tensor and reduces it."""

  def _reduce(tensor, how, *args):
    return getattr(tf, 'reduce_{}'.format(how))(tensor, *args)  # pylint:disable=not-callable

  tensor = snt.BatchFlatten()(tensor)
  tensor = _reduce(tensor, reduce_type, -1)
  if final_reduce is not None:
    tensor = _reduce(tensor, final_reduce)

  return tensor 
开发者ID:akosiorek,项目名称:stacked_capsule_autoencoders,代码行数:14,代码来源:math_ops.py


注:本文中的sonnet.BatchFlatten方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。