當前位置: 首頁>>代碼示例>>Python>>正文


Python numpy.zeros_like方法代碼示例

本文整理匯總了Python中numpy.zeros_like方法的典型用法代碼示例。如果您正苦於以下問題:Python numpy.zeros_like方法的具體用法?Python numpy.zeros_like怎麽用?Python numpy.zeros_like使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在numpy的用法示例。


在下文中一共展示了numpy.zeros_like方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: sgdmomentum

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import zeros_like [as 別名]
def sgdmomentum(self, cost, params,constraints={}, lr=0.01,consider_constant=None, momentum=0.):
        """
        Stochatic gradient descent with momentum. Momentum has to be in [0, 1)
        """
        # Check that the momentum is a correct value
        assert 0 <= momentum < 1

        lr = theano.shared(np.float32(lr).astype(floatX))
        momentum = theano.shared(np.float32(momentum).astype(floatX))

        gradients = self.get_gradients(cost, params)
        velocities = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]

        updates = []
        for param, gradient, velocity in zip(params, gradients, velocities):
            new_velocity = momentum * velocity - lr * gradient
            updates.append((velocity, new_velocity))
            new_p=param+new_velocity;
            # apply constraints
            if param in constraints:
                c=constraints[param];
                new_p=c(new_p);
            updates.append((param, new_p))
        return updates 
開發者ID:lingluodlut,項目名稱:Att-ChemdNER,代碼行數:26,代碼來源:optimization.py

示例2: adagrad

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import zeros_like [as 別名]
def adagrad(self, cost, params, lr=1.0, epsilon=1e-6,consider_constant=None):
        """
        Adagrad. Based on http://www.ark.cs.cmu.edu/cdyer/adagrad.pdf
        """
        lr = theano.shared(np.float32(lr).astype(floatX))
        epsilon = theano.shared(np.float32(epsilon).astype(floatX))

        gradients = self.get_gradients(cost, params,consider_constant)
        gsums = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]

        updates = []
        for param, gradient, gsum in zip(params, gradients, gsums):
            new_gsum = gsum + gradient ** 2.
            updates.append((gsum, new_gsum))
            updates.append((param, param - lr * gradient / (T.sqrt(gsum + epsilon))))
        return updates 
開發者ID:lingluodlut,項目名稱:Att-ChemdNER,代碼行數:18,代碼來源:optimization.py

示例3: adadelta

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import zeros_like [as 別名]
def adadelta(self, cost, params, rho=0.95, epsilon=1e-6,consider_constant=None):
        """
        Adadelta. Based on:
        http://www.matthewzeiler.com/pubs/googleTR2012/googleTR2012.pdf
        """
        rho = theano.shared(np.float32(rho).astype(floatX))
        epsilon = theano.shared(np.float32(epsilon).astype(floatX))

        gradients = self.get_gradients(cost, params,consider_constant)
        accu_gradients = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]
        accu_deltas = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]

        updates = []
        for param, gradient, accu_gradient, accu_delta in zip(params, gradients, accu_gradients, accu_deltas):
            new_accu_gradient = rho * accu_gradient + (1. - rho) * gradient ** 2.
            delta_x = - T.sqrt((accu_delta + epsilon) / (new_accu_gradient + epsilon)) * gradient
            new_accu_delta = rho * accu_delta + (1. - rho) * delta_x ** 2.
            updates.append((accu_gradient, new_accu_gradient))
            updates.append((accu_delta, new_accu_delta))
            updates.append((param, param + delta_x))
        return updates 
開發者ID:lingluodlut,項目名稱:Att-ChemdNER,代碼行數:23,代碼來源:optimization.py

示例4: rmsprop

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import zeros_like [as 別名]
def rmsprop(self, cost, params, lr=0.001, rho=0.9, eps=1e-6,consider_constant=None):
        """
        RMSProp.
        """
        lr = theano.shared(np.float32(lr).astype(floatX))

        gradients = self.get_gradients(cost, params,consider_constant)
        accumulators = [theano.shared(np.zeros_like(p.get_value()).astype(np.float32)) for p in params]

        updates = []

        for param, gradient, accumulator in zip(params, gradients, accumulators):
            new_accumulator = rho * accumulator + (1 - rho) * gradient ** 2
            updates.append((accumulator, new_accumulator))

            new_param = param - lr * gradient / T.sqrt(new_accumulator + eps)
            updates.append((param, new_param))

        return updates 
開發者ID:lingluodlut,項目名稱:Att-ChemdNER,代碼行數:21,代碼來源:optimization.py

示例5: data_augmentation

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import zeros_like [as 別名]
def data_augmentation(self, x_train):
        _, c, h, w = x_train.shape
        pad_h = h + 2 * self.pad_size
        pad_w = w + 2 * self.pad_size
        aug_data = np.zeros_like(x_train)
        for i, x in enumerate(x_train):
            pad_img = np.zeros((c, pad_h, pad_w))
            pad_img[:, self.pad_size:h+self.pad_size, self.pad_size:w+self.pad_size] = x

            # Randomly crop and horizontal flip the image
            top = np.random.randint(0, pad_h - h + 1)
            left = np.random.randint(0, pad_w - w + 1)
            bottom = top + h
            right = left + w
            if np.random.randint(0, 2):
                pad_img = pad_img[:, :, ::-1]

            aug_data[i] = pad_img[:, top:bottom, left:right]

        return aug_data 
開發者ID:sg-nm,項目名稱:cgp-cnn,代碼行數:22,代碼來源:cnn_train.py

示例6: train_lr_rfeinman

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import zeros_like [as 別名]
def train_lr_rfeinman(densities_pos, densities_neg, uncerts_pos, uncerts_neg):
    """
    TODO
    :param densities_pos:
    :param densities_neg:
    :param uncerts_pos:
    :param uncerts_neg:
    :return:
    """
    values_neg = np.concatenate(
        (densities_neg.reshape((1, -1)),
         uncerts_neg.reshape((1, -1))),
        axis=0).transpose([1, 0])
    values_pos = np.concatenate(
        (densities_pos.reshape((1, -1)),
         uncerts_pos.reshape((1, -1))),
        axis=0).transpose([1, 0])

    values = np.concatenate((values_neg, values_pos))
    labels = np.concatenate(
        (np.zeros_like(densities_neg), np.ones_like(densities_pos)))

    lr = LogisticRegressionCV(n_jobs=-1).fit(values, labels)

    return values, labels, lr 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:27,代碼來源:util.py

示例7: compute_roc_rfeinman

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import zeros_like [as 別名]
def compute_roc_rfeinman(probs_neg, probs_pos, plot=False):
    """
    TODO
    :param probs_neg:
    :param probs_pos:
    :param plot:
    :return:
    """
    probs = np.concatenate((probs_neg, probs_pos))
    labels = np.concatenate((np.zeros_like(probs_neg), np.ones_like(probs_pos)))
    fpr, tpr, _ = roc_curve(labels, probs)
    auc_score = auc(fpr, tpr)
    if plot:
        plt.figure(figsize=(7, 6))
        plt.plot(fpr, tpr, color='blue',
                 label='ROC (AUC = %0.4f)' % auc_score)
        plt.legend(loc='lower right')
        plt.title("ROC Curve")
        plt.xlabel("FPR")
        plt.ylabel("TPR")
        plt.show()

    return fpr, tpr, auc_score 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:25,代碼來源:util.py

示例8: numerical_gradient

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import zeros_like [as 別名]
def numerical_gradient(f, x):
    h = 1e-4
    grad = np.zeros_like(x)

    for idx in range(x.size):
        tmp_val = x[idx]
        # f(x+h) 的計算
        x[idx] = tmp_val + h
        fxh1 = f(x)
        # f(x-h) 的計算
        x[idx] = tmp_val - h
        fxh2 = f(x)

        grad[idx] = (fxh1 - fxh2) / (2 * h)
        x[idx] = tmp_val

    return grad


# 梯度下降 
開發者ID:wdxtub,項目名稱:deep-learning-note,代碼行數:22,代碼來源:7_gradient.py

示例9: _numerical_gradient_1d

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import zeros_like [as 別名]
def _numerical_gradient_1d(f, x):
    h = 1e-4  # 0.0001
    grad = np.zeros_like(x)

    for idx in range(x.size):
        tmp_val = x[idx]
        x[idx] = float(tmp_val) + h
        fxh1 = f(x)  # f(x+h)

        x[idx] = tmp_val - h
        fxh2 = f(x)  # f(x-h)
        grad[idx] = (fxh1 - fxh2) / (2 * h)

        x[idx] = tmp_val  # 還原值

    return grad 
開發者ID:wdxtub,項目名稱:deep-learning-note,代碼行數:18,代碼來源:gradient.py

示例10: numerical_gradient

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import zeros_like [as 別名]
def numerical_gradient(f, x):
    h = 1e-4  # 0.0001
    grad = np.zeros_like(x)

    it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
    while not it.finished:
        idx = it.multi_index
        tmp_val = x[idx]
        x[idx] = float(tmp_val) + h
        fxh1 = f(x)  # f(x+h)

        x[idx] = tmp_val - h
        fxh2 = f(x)  # f(x-h)
        grad[idx] = (fxh1 - fxh2) / (2 * h)

        x[idx] = tmp_val  # 還原值
        it.iternext()

    return grad 
開發者ID:wdxtub,項目名稱:deep-learning-note,代碼行數:21,代碼來源:gradient.py

示例11: update

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import zeros_like [as 別名]
def update(self, index, weight, grad, state):
        self._update_count(index)
        wd = self._get_wd(index)
        lr = self._get_lr(index)
        num_rows = weight.shape[0]

        dn, n = state
        for row in range(num_rows):
            all_zeros = mx.test_utils.almost_equal(grad[row].asnumpy(), np.zeros_like(grad[row].asnumpy()))
            if all_zeros and self.lazy_update:
                continue
            grad[row] = grad[row] * self.rescale_grad
            if self.clip_gradient is not None:
                mx.nd.clip(grad[row], -self.clip_gradient, self.clip_gradient, out=grad[row])

            #update dn, n
            dn[row] += grad[row] - (mx.nd.sqrt(n[row] + grad[row] * grad[row]) - mx.nd.sqrt(n[row])) * weight[row] / lr
            n[row] += grad[row] * grad[row]

            # update weight
            weight[row] = (mx.nd.sign(dn[row]) * self.lamda1 - dn[row]) / \
                          ((self.beta + mx.nd.sqrt(n[row])) / lr + wd) * (mx.nd.abs(dn[row]) > self.lamda1) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:24,代碼來源:test_optimizer.py

示例12: conjugate_gradient

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import zeros_like [as 別名]
def conjugate_gradient(f_Ax, b, cg_iters=10, residual_tol=1e-10):
  p = b.copy()
  r = b.copy()
  x = np.zeros_like(b)
  rdotr = r.dot(r)
  for i in xrange(cg_iters):
    z = f_Ax(p)
    v = rdotr / p.dot(z)
    x += v * p
    r -= v * z
    newrdotr = r.dot(r)
    mu = newrdotr / rdotr
    p = r + mu * p
    rdotr = newrdotr
    if rdotr < residual_tol:
      break
  return x 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:19,代碼來源:trust_region.py

示例13: count_super

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import zeros_like [as 別名]
def count_super(p, m, counters, preds, labels, label_to_ch):
    
    for l in np.unique(labels):
        preds_l = preds[labels == l]
        
        # in -> known
        if label_to_ch[l]:
            acc = np.zeros_like(preds_l, dtype=bool)
            for c in label_to_ch[l]:
                if p == 0: counters['data'][m][c] += preds_l.shape[0]
                acc |= (preds_l == c)
            acc_sum = acc.sum()
            for c in label_to_ch[l]:
                counters['acc'][p,m][c] += acc_sum
        
        # out -> novel
        else:
            if p == 0: counters['data'][m][-1] += preds_l.shape[0]
            acc_sum = (preds_l < 0).sum()
            counters['acc'][p,m][-1] += acc_sum 
開發者ID:kibok90,項目名稱:cvpr2018-hnd,代碼行數:22,代碼來源:test.py

示例14: evaluate

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import zeros_like [as 別名]
def evaluate(self, data_input_obs, data_input_ruitu, data_labels, data_ids, data_time, each_station_display=False):     
        all_loss=[]
        for i in range(10): # iterate for each station. (sample_ind, timestep, staionID, features)
            #batch_placeholders = np.zeros_like(data_labels[:,:,i,:])
            val_loss= self.model.evaluate(x=[data_input_obs[:,:,i,:], data_input_ruitu[:,:,i,:], data_ids[:,:,i], data_time],
                                y=[data_labels[:,:,i,:]], verbose=False)

            all_loss.append(val_loss)

            if each_station_display:
                print('\tFor station 9000{}, val loss: {}'.format(i+1, val_loss))
        
        self.current_mean_val_loss = np.mean(all_loss)
        print('Mean val loss:', self.current_mean_val_loss)

        self.val_loss_list.append(self.current_mean_val_loss) 
開發者ID:BruceBinBoxing,項目名稱:Deep_Learning_Weather_Forecasting,代碼行數:18,代碼來源:seq2seq_class.py

示例15: evaluate

# 需要導入模塊: import numpy [as 別名]
# 或者: from numpy import zeros_like [as 別名]
def evaluate(self, data_input_obs, data_input_ruitu, data_labels, data_ids, data_time, each_station_display=False):     
        all_loss=[]
        for i in range(10): # iterate for each station. (sample_ind, timestep, staionID, features)
            #batch_placeholders = np.zeros_like(data_labels[:,:,i,:])
            val_loss= self.model.evaluate(x=[data_input_obs[:,:,i,:], data_input_ruitu[:,:,i,:], data_ids[:,:,i], data_time],
                                y=[data_labels[:,:,i,:]], verbose=False)

            all_loss.append(val_loss)

            if each_station_display:
                print('\tFor station 9000{}, val MLE loss: {}'.format(i+1, val_loss))
        
        self.current_mean_val_loss = np.mean(all_loss)
        print('Mean val MLE loss:', self.current_mean_val_loss)

        self.val_loss_list.append(self.current_mean_val_loss) 
開發者ID:BruceBinBoxing,項目名稱:Deep_Learning_Weather_Forecasting,代碼行數:18,代碼來源:competition_model_class.py


注:本文中的numpy.zeros_like方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。