当前位置: 首页>>代码示例>>Python>>正文


Python numpy.zeros函数代码示例

本文整理汇总了Python中minpy.numpy.zeros函数的典型用法代码示例。如果您正苦于以下问题:Python zeros函数的具体用法?Python zeros怎么用?Python zeros使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了zeros函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

def main(args):
    # Create model.
    model = RNNNet(args)
    for k, v in model.param_configs.items():
        model.params[k] = np.zeros(v['shape'])

    data = np.zeros((args.batch_size, args.input_size)) # Data of only one time step.
    label = np.zeros((args.batch_size,), dtype=np.int)

    for l in range(args.num_loops):
        if l == num_cold:
            start = time.time()
        def loss_func(*params):
            f = model.forward(data, 'train')
            return model.loss(f, label)
        if args.only_forward:
            loss = loss_func()
            loss.asnumpy()
        else:
            param_arrays = list(model.params.values())
            param_keys = list(model.params.keys())
            grad_and_loss_func = core.grad_and_loss(
                loss_func, argnum=range(len(param_arrays)))
            grad_arrays, loss = grad_and_loss_func(*param_arrays)
    dur = time.time() - start
    print('Per Loop Time: %.6f' % (dur / (args.num_loops - num_cold)))
开发者ID:HrWangChengdu,项目名称:minpy,代码行数:26,代码来源:rnn_minpy_cpu.py

示例2: lstm_temporal

def lstm_temporal(x, h0, Wx, Wh, b):
    """
    Forward pass for an LSTM over an entire sequence of data. We assume an input
    sequence composed of T vectors, each of dimension D. The LSTM uses a hidden
    size of H, and we work over a minibatch containing N sequences. After running
    the LSTM forward, we return the hidden states for all timesteps.

    Note that the initial cell state is passed as input, but the initial cell
    state is set to zero. Also note that the cell state is not returned; it is
    an internal variable to the LSTM and is not accessed from outside.

    Inputs:
    - x: Input data of shape (N, T, D)
    - h0: Initial hidden state of shape (N, H)
    - Wx: Weights for input-to-hidden connections, of shape (D, 4H)
    - Wh: Weights for hidden-to-hidden connections, of shape (H, 4H)
    - b: Biases of shape (4H,)

    Returns a tuple of:
    - h: Hidden states for all timesteps of all sequences, of shape (N, T, H)
    """
    N, T, D = x.shape
    _, H = h0.shape
    c = np.zeros([N, 0, H])
    h = np.zeros([N, 0, H])
    for t in xrange(T):
        h_step, c_step = lstm_step(
          x[:, t, :], h[:, t-1, :] if t > 0 else h0, c[:, t-1, :] if t > 0 else np.zeros((N, H)), Wx, Wh, b)
        h_step = h_step.reshape(N, 1, H)
        c_step = c_step.reshape(N, 1, H)
        h = np.append(h, h_step, axis=1)
        c = np.append(c, c_step, axis=1)
    return h
开发者ID:ZihengJiang,项目名称:minpy,代码行数:33,代码来源:layers.py

示例3: __init__

    def __init__(self,
                 input_dim=3 * 32 * 32,
                 hidden_dim=100,
                 num_classes=10,
                 weight_scale=1e-3,
                 reg=0.0,
                 conv_mode='lazy',
                 dtype=py_np.float64):
        """
        Initialize a new network.

        Inputs:
        - input_dim: An integer giving the size of the input
        - hidden_dim: An integer giving the size of the hidden layer
        - num_classes: An integer giving the number of classes to classify
        - dropout: Scalar between 0 and 1 giving dropout strength.
        - weight_scale: Scalar giving the standard deviation for random
          initialization of the weights.
        - reg: Scalar giving L2 regularization strength.
        """
        super(TwoLayerNet, self).__init__(conv_mode)
        self.params = {}
        self.reg = reg

        self.params['W1'] = random.randn(input_dim, hidden_dim) * weight_scale
        self.params['b1'] = np.zeros((hidden_dim))
        self.params['W2'] = random.randn(hidden_dim, num_classes) * weight_scale
        self.params['b2'] = np.zeros((num_classes))
开发者ID:colinsongf,项目名称:minpy,代码行数:28,代码来源:fc_net_minpy.py

示例4: main

def main(args):
    # Create model.
    model = TwoLayerNet(args)
    for k, v in model.param_configs.items():
        model.params[k] = np.zeros(v['shape'])

    img = np.zeros((args.batch_size, 784))
    label = np.zeros((args.batch_size,))

    start = time.time()
    for l in range(num_loops):
        def loss_func(*params):
            f = model.forward(img, 'train')
            return model.loss(f, label)
        if args.only_forward:
            loss_func()
            loss.asnumpy()
        else:
            param_arrays = list(model.params.values())
            param_keys = list(model.params.keys())
            grad_and_loss_func = minpy.core.grad_and_loss(
                loss_func, argnum=range(len(param_arrays)))
            grad_arrays, loss = grad_and_loss_func(*param_arrays)
            for g in grad_arrays:
                g.get_data(minpy.array_variants.ArrayType.MXNET).wait_to_read()
    dur = time.time() - start
    print('Per Loop Time: %.6f' % (dur / num_loops))
开发者ID:ZihengJiang,项目名称:minpy,代码行数:27,代码来源:mlp_minpy_gpu.py

示例5: gaussian_cluster_generator

 def gaussian_cluster_generator(num_samples=10000, num_features=500, num_classes=5):
     mu = np.random.rand(num_classes, num_features)
     sigma = np.ones((num_classes, num_features)) * 0.1
     num_cls_samples = int(num_samples / num_classes)
     x = np.zeros((num_samples, num_features))
     y = np.zeros((num_samples, num_classes))
     for i in range(num_classes):
         cls_samples = np.random.normal(mu[i,:], sigma[i,:], (num_cls_samples, num_features))
         x[i*num_cls_samples:(i+1)*num_cls_samples] = cls_samples
         y[i*num_cls_samples:(i+1)*num_cls_samples,i] = 1
     return x, y
开发者ID:lryta,项目名称:minpy,代码行数:11,代码来源:test_policy.py

示例6: forward

 def forward(self, X, mode):
     seq_len = X.shape[1]
     batch_size = X.shape[0]
     hidden_size = self.params['Wh'].shape[0]
     h = np.zeros((batch_size, hidden_size))
     c = np.zeros((batch_size, hidden_size))
     for t in xrange(seq_len):
         h, c = layers.lstm_step(X[:, t, :], h, c, self.params['Wx'], self.params['Wh'],
                                 self.params['b'])
     y = layers.affine(h, self.params['Wa'], self.params['ba'])
     return y
开发者ID:lryta,项目名称:minpy,代码行数:11,代码来源:lstm.py

示例7: softmax_cross_entropy

def softmax_cross_entropy(prob, label):
    """
    Computes the cross entropy for softmax activation.

    Inputs:
    - prob: Probability, of shape (N, C) where x[i, j] is the probability for the jth class
      for the ith input.
    - label: Either of the followings:
      - One hot encoding of labels, of shape (N, C)
      - Label index of shape (N, ), each y[i] is the label of i^th example
        (0 <= y[i] < C)

    Returns a Value:
    - cross_entropy
    """

    N = prob.shape[0]
    C = prob.shape[1]
    if len(label.shape) == 1:
        #convert it to one hot encoding
        onehot_label = np.zeros([N, C])
        np.onehot_encode(label, onehot_label)
    else:
        onehot_label = label
    return -np.sum(np.log(prob) * onehot_label) / N
开发者ID:HrWangChengdu,项目名称:minpy,代码行数:25,代码来源:layers.py

示例8: rnn_temporal

def rnn_temporal(x, h0, Wx, Wh, b):
    """
    Run a vanilla RNN forward on an entire sequence of data. We assume an input
    sequence composed of T vectors, each of dimension D. The RNN uses a hidden
    size of H, and we work over a minibatch containing N sequences. After running
    the RNN forward, we return the hidden states for all timesteps.

    Inputs:
    - x: Input data for the entire timeseries, of shape (N, T, D).
    - h0: Initial hidden state, of shape (N, H)
    - Wx: Weight matrix for input-to-hidden connections, of shape (D, H)
    - Wh: Weight matrix for hidden-to-hidden connections, of shape (H, H)
    - b: Biases of shape (H,)

    Returns a tuple of:
    - h: Hidden states for the entire timeseries, of shape (N, T, H).
    """
    N, T, _ = x.shape
    H = h0.shape[1]
    h = np.zeros([N, 0, H])
    for t in range(T):
        h_step = rnn_step(x[:, t, :], h0 if t == 0 else h[:, t - 1, :], Wx, Wh,
                          b).reshape(N, 1, H)
        h = np.append(h, h_step, axis=1)
    return h
开发者ID:HrWangChengdu,项目名称:minpy,代码行数:25,代码来源:layers.py

示例9: forward

 def forward(self, X, mode):
     h = np.zeros(self.hshape)  # init hidden state
     for t in xrange(self.num_unroll_steps):
         h = layers.rnn_step(X, h, self.params['Wx'],
                             self.params['Wh'], self.params['b'])
     y = layers.affine(h, self.params['Wa'], self.params['ba'])
     return y
开发者ID:HrWangChengdu,项目名称:minpy,代码行数:7,代码来源:rnn_minpy_cpu.py

示例10: softmax_loss

def softmax_loss(x, y):
    """
    Computes the loss and gradient for softmax classification.

    Inputs:
    - x: Input data, of shape (N, C) where x[i, j] is the score for the jth class
      for the ith input.
    - y: Either of the followings:
      - One hot encoding of labels, of shape (N, C)
      - Label index of shape (N, ), each y[i] is the label of i^th example
        (0 <= y[i] < C)

    Returns a tuple of:
    - loss: Scalar giving the loss
    """
    N = x.shape[0]
    C = x.shape[1]
    if len(y.shape) == 1:
        #convert it to one hot encoding
        onehot_y = np.zeros([N, C])
        np.onehot_encode(y, onehot_y)
    else:
        onehot_y = y
    probs = x - np.max(x, axis=1, keepdims=True)
    loss = -np.sum(probs * onehot_y) / N
    loss += np.sum(np.log(np.sum(np.exp(probs), axis=1, keepdims=True))) / N
    return loss
开发者ID:ZihengJiang,项目名称:minpy,代码行数:27,代码来源:layers.py

示例11: test_lr_grad

def test_lr_grad():
    def sigmoid(x):
        return 0.5 * (np.tanh(x / 2) + 1)

    def predict(weights, inputs):
        return sigmoid(np.dot(inputs, weights))

    def training_loss(inputs):
        preds = predict(weights, inputs)
        label_probabilities = preds * targets + (1 - preds) * (1 - targets)
        l = -np.sum(np.log(label_probabilities))
        return l

    def training_accuracy(weights, inputs):
        preds = predict(weights, inputs)
        error = np.count_nonzero(np.argmax(preds, axis=1) - np.argmax(targets, axis=1))
        return (256 - error) * 100 / 256.0

    wshape = (500, 250)
    weights = random.rand(*wshape) - 0.5

    xshape = (256, 500)
    tshape = (256, 250)
    inputs = random.rand(*xshape) - 0.5
    targets = np.zeros(tshape)
    truth = random.randint(0, 250, 256)
    targets[np.arange(256), truth] = 1

    gradient_checker.quick_grad_check(training_loss, inputs)
开发者ID:ZihengJiang,项目名称:minpy,代码行数:29,代码来源:test_lr_grad.py

示例12: test_context

def test_context():
    set_context(gpu(1)) # set the global context as gpu(1)
    
    def sigmoid(x):
        return 0.5 * (np.tanh(x / 2) + 1)
    
    def predict(weights, inputs):
        return sigmoid(np.dot(inputs, weights))
    
    def training_loss(weights, inputs):
        preds = predict(weights, inputs)
        label_probabilities = preds * targets + (1 - preds) * (1 - targets)
        l = -np.sum(np.log(label_probabilities))
        return l
    
    def training_accuracy(weights, inputs):
        preds = predict(weights, inputs)
        error = np.count_nonzero(np.argmax(preds, axis=1) - np.argmax(targets, axis=1))
        return (256 - error) * 100 / 256.0
    
    with gpu(0):
        xshape = (256, 500)
        wshape = (500, 250)
        tshape = (256, 250)
        inputs = random.rand(*xshape) - 0.5
        targets = np.zeros(tshape)
        truth = random.randint(0, 250, 256)
        targets[np.arange(256), truth] = 1
        weights = random.rand(*wshape) - 0.5
    
        training_gradient_fun = grad(training_loss)
    
        for i in range(20):
            print('Trained loss accuracy #{}: {}%'.format(i, training_accuracy(weights, inputs)))
            gr = training_gradient_fun(weights, inputs)
            weights -= gr * 0.01
        print("\nff and bp on {0}".format(weights.context))
    
    print("\nexecute on cpu")
    with cpu():
        x_cpu = random.rand(32, 64) - 0.5
        y_cpu = random.rand(64, 32) - 0.5
        z_cpu = np.dot(x_cpu, y_cpu)
        print('z_cpu.context = {0}'.format(z_cpu.context))
    
    print("\nexecute on gpu(0)")
    with gpu(0):
        x_gpu0 = random.rand(32, 64) - 0.5
        y_gpu0 = random.rand(64, 32) - 0.5
        z_gpu0 = np.dot(x_gpu0, y_gpu0)
        z_gpu0.asnumpy()
        print('z_gpu0.context = {0}'.format(z_gpu0.context))
    
    print("\n[use global context] execute on gpu(1)")
    x_gpu1 = random.rand(32, 64) - 0.5
    y_gpu1 = random.rand(64, 32) - 0.5
    z_gpu1 = np.dot(x_gpu1, y_gpu1)
    z_gpu1.asnumpy()
    print('z_gpu1.context = {0}'.format(z_gpu1.context))
开发者ID:HrWangChengdu,项目名称:minpy,代码行数:59,代码来源:test_context.py

示例13: forward

 def forward(self, X, mode):
     out = self.conv(X=X, **self.params)
     out = layers.affine(out, self.params['w1'], self.params['b1'])
     out = layers.relu(out)
     out = layers.affine(out, self.params['w2'], self.params['b2'])
     # This verifies whether symbols can be reused.
     trash = self.conv(X=np.zeros(X.shape), **self.params)
     return out
开发者ID:lryta,项目名称:minpy,代码行数:8,代码来源:test_cnn.py

示例14: set_param

    def set_param(self):
        self.params = {}

        c_cnt, height, width = self.input_dim
        f_cnt = self.num_filters
        f_h, f_w = self.filter_size, self.filter_size

        self.params['conv1_weight'] = random.randn(f_cnt, c_cnt, f_h,
                                                   f_w) * self.weight_scale
        self.params['conv1_bias'] = np.zeros(f_cnt)

        #TODO(Haoran): whole stuff about all dimension calculations
        #should be substituted by quering symbol.arg_list
        conv_stride = 1
        conv_pad = (f_h - 1) / 2

        Hc, Wc = 1 + (height + 2 * conv_pad - f_h) / conv_stride, 1 + (
            width + 2 * conv_pad - f_w) / conv_stride

        pool_height, pool_width = 2, 2
        pool_stride = 2

        Hp, Wp = (Hc - pool_height) / pool_stride + 1, (Wc - pool_width
                                                       ) / pool_stride + 1

        # weight has to be tranposed to fit mxnet's symbol
        self.params['fc1_weight'] = np.transpose(random.randn(
            5408, self.hidden_dim) * self.weight_scale)
        self.params['fc1_bias'] = np.zeros((self.hidden_dim))

        # weight has to be tranposed to fit mxnet's symbol
        self.params['fc2_weight'] = np.transpose(random.randn(
            self.hidden_dim, self.num_classes) * self.weight_scale)
        self.params['fc2_bias'] = np.zeros((self.num_classes))

        #TODO(Haoran): move following into parent structured model class
        self.param_keys = self.params.keys()

        # Build key's index in loss func's arglist
        self.key_args_index = {}
        for i, key in enumerate(self.param_keys):
            # data, targets would be the first two elments in arglist
            self.key_args_index[key] = self.data_target_cnt + i
开发者ID:colinsongf,项目名称:minpy,代码行数:43,代码来源:cnn_minpy.py

示例15: softmax_cross_entropy

def softmax_cross_entropy(prob, label):
    N = prob.shape[0]
    C = prob.shape[1]
    if len(label.shape) == 1:
        #convert it to one hot encoding
        onehot_label = np.zeros([N, C])
        np.onehot_encode(label, onehot_label)
    else:
        onehot_label = label
    return -np.sum(np.log(prob) * onehot_label) / N
开发者ID:ZihengJiang,项目名称:minpy,代码行数:10,代码来源:layers.py


注:本文中的minpy.numpy.zeros函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。