当前位置: 首页>>代码示例>>Python>>正文


Python numpy.zeros_like方法代码示例

本文整理汇总了Python中numpy.zeros_like方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.zeros_like方法的具体用法?Python numpy.zeros_like怎么用?Python numpy.zeros_like使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在numpy的用法示例。


在下文中一共展示了numpy.zeros_like方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: sgdmomentum

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import zeros_like [as 别名]
def sgdmomentum(self, cost, params,constraints={}, lr=0.01,consider_constant=None, momentum=0.):
        """
        Stochatic gradient descent with momentum. Momentum has to be in [0, 1)
        """
        # Check that the momentum is a correct value
        assert 0 <= momentum < 1

        lr = theano.shared(np.float32(lr).astype(floatX))
        momentum = theano.shared(np.float32(momentum).astype(floatX))

        gradients = self.get_gradients(cost, params)
        velocities = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]

        updates = []
        for param, gradient, velocity in zip(params, gradients, velocities):
            new_velocity = momentum * velocity - lr * gradient
            updates.append((velocity, new_velocity))
            new_p=param+new_velocity;
            # apply constraints
            if param in constraints:
                c=constraints[param];
                new_p=c(new_p);
            updates.append((param, new_p))
        return updates 
开发者ID:lingluodlut,项目名称:Att-ChemdNER,代码行数:26,代码来源:optimization.py

示例2: adagrad

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import zeros_like [as 别名]
def adagrad(self, cost, params, lr=1.0, epsilon=1e-6,consider_constant=None):
        """
        Adagrad. Based on http://www.ark.cs.cmu.edu/cdyer/adagrad.pdf
        """
        lr = theano.shared(np.float32(lr).astype(floatX))
        epsilon = theano.shared(np.float32(epsilon).astype(floatX))

        gradients = self.get_gradients(cost, params,consider_constant)
        gsums = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]

        updates = []
        for param, gradient, gsum in zip(params, gradients, gsums):
            new_gsum = gsum + gradient ** 2.
            updates.append((gsum, new_gsum))
            updates.append((param, param - lr * gradient / (T.sqrt(gsum + epsilon))))
        return updates 
开发者ID:lingluodlut,项目名称:Att-ChemdNER,代码行数:18,代码来源:optimization.py

示例3: adadelta

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import zeros_like [as 别名]
def adadelta(self, cost, params, rho=0.95, epsilon=1e-6,consider_constant=None):
        """
        Adadelta. Based on:
        http://www.matthewzeiler.com/pubs/googleTR2012/googleTR2012.pdf
        """
        rho = theano.shared(np.float32(rho).astype(floatX))
        epsilon = theano.shared(np.float32(epsilon).astype(floatX))

        gradients = self.get_gradients(cost, params,consider_constant)
        accu_gradients = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]
        accu_deltas = [theano.shared(np.zeros_like(param.get_value(borrow=True)).astype(floatX)) for param in params]

        updates = []
        for param, gradient, accu_gradient, accu_delta in zip(params, gradients, accu_gradients, accu_deltas):
            new_accu_gradient = rho * accu_gradient + (1. - rho) * gradient ** 2.
            delta_x = - T.sqrt((accu_delta + epsilon) / (new_accu_gradient + epsilon)) * gradient
            new_accu_delta = rho * accu_delta + (1. - rho) * delta_x ** 2.
            updates.append((accu_gradient, new_accu_gradient))
            updates.append((accu_delta, new_accu_delta))
            updates.append((param, param + delta_x))
        return updates 
开发者ID:lingluodlut,项目名称:Att-ChemdNER,代码行数:23,代码来源:optimization.py

示例4: rmsprop

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import zeros_like [as 别名]
def rmsprop(self, cost, params, lr=0.001, rho=0.9, eps=1e-6,consider_constant=None):
        """
        RMSProp.
        """
        lr = theano.shared(np.float32(lr).astype(floatX))

        gradients = self.get_gradients(cost, params,consider_constant)
        accumulators = [theano.shared(np.zeros_like(p.get_value()).astype(np.float32)) for p in params]

        updates = []

        for param, gradient, accumulator in zip(params, gradients, accumulators):
            new_accumulator = rho * accumulator + (1 - rho) * gradient ** 2
            updates.append((accumulator, new_accumulator))

            new_param = param - lr * gradient / T.sqrt(new_accumulator + eps)
            updates.append((param, new_param))

        return updates 
开发者ID:lingluodlut,项目名称:Att-ChemdNER,代码行数:21,代码来源:optimization.py

示例5: data_augmentation

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import zeros_like [as 别名]
def data_augmentation(self, x_train):
        _, c, h, w = x_train.shape
        pad_h = h + 2 * self.pad_size
        pad_w = w + 2 * self.pad_size
        aug_data = np.zeros_like(x_train)
        for i, x in enumerate(x_train):
            pad_img = np.zeros((c, pad_h, pad_w))
            pad_img[:, self.pad_size:h+self.pad_size, self.pad_size:w+self.pad_size] = x

            # Randomly crop and horizontal flip the image
            top = np.random.randint(0, pad_h - h + 1)
            left = np.random.randint(0, pad_w - w + 1)
            bottom = top + h
            right = left + w
            if np.random.randint(0, 2):
                pad_img = pad_img[:, :, ::-1]

            aug_data[i] = pad_img[:, top:bottom, left:right]

        return aug_data 
开发者ID:sg-nm,项目名称:cgp-cnn,代码行数:22,代码来源:cnn_train.py

示例6: train_lr_rfeinman

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import zeros_like [as 别名]
def train_lr_rfeinman(densities_pos, densities_neg, uncerts_pos, uncerts_neg):
    """
    TODO
    :param densities_pos:
    :param densities_neg:
    :param uncerts_pos:
    :param uncerts_neg:
    :return:
    """
    values_neg = np.concatenate(
        (densities_neg.reshape((1, -1)),
         uncerts_neg.reshape((1, -1))),
        axis=0).transpose([1, 0])
    values_pos = np.concatenate(
        (densities_pos.reshape((1, -1)),
         uncerts_pos.reshape((1, -1))),
        axis=0).transpose([1, 0])

    values = np.concatenate((values_neg, values_pos))
    labels = np.concatenate(
        (np.zeros_like(densities_neg), np.ones_like(densities_pos)))

    lr = LogisticRegressionCV(n_jobs=-1).fit(values, labels)

    return values, labels, lr 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:27,代码来源:util.py

示例7: compute_roc_rfeinman

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import zeros_like [as 别名]
def compute_roc_rfeinman(probs_neg, probs_pos, plot=False):
    """
    TODO
    :param probs_neg:
    :param probs_pos:
    :param plot:
    :return:
    """
    probs = np.concatenate((probs_neg, probs_pos))
    labels = np.concatenate((np.zeros_like(probs_neg), np.ones_like(probs_pos)))
    fpr, tpr, _ = roc_curve(labels, probs)
    auc_score = auc(fpr, tpr)
    if plot:
        plt.figure(figsize=(7, 6))
        plt.plot(fpr, tpr, color='blue',
                 label='ROC (AUC = %0.4f)' % auc_score)
        plt.legend(loc='lower right')
        plt.title("ROC Curve")
        plt.xlabel("FPR")
        plt.ylabel("TPR")
        plt.show()

    return fpr, tpr, auc_score 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:25,代码来源:util.py

示例8: numerical_gradient

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import zeros_like [as 别名]
def numerical_gradient(f, x):
    h = 1e-4
    grad = np.zeros_like(x)

    for idx in range(x.size):
        tmp_val = x[idx]
        # f(x+h) 的计算
        x[idx] = tmp_val + h
        fxh1 = f(x)
        # f(x-h) 的计算
        x[idx] = tmp_val - h
        fxh2 = f(x)

        grad[idx] = (fxh1 - fxh2) / (2 * h)
        x[idx] = tmp_val

    return grad


# 梯度下降 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:22,代码来源:7_gradient.py

示例9: _numerical_gradient_1d

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import zeros_like [as 别名]
def _numerical_gradient_1d(f, x):
    h = 1e-4  # 0.0001
    grad = np.zeros_like(x)

    for idx in range(x.size):
        tmp_val = x[idx]
        x[idx] = float(tmp_val) + h
        fxh1 = f(x)  # f(x+h)

        x[idx] = tmp_val - h
        fxh2 = f(x)  # f(x-h)
        grad[idx] = (fxh1 - fxh2) / (2 * h)

        x[idx] = tmp_val  # 还原值

    return grad 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:18,代码来源:gradient.py

示例10: numerical_gradient

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import zeros_like [as 别名]
def numerical_gradient(f, x):
    h = 1e-4  # 0.0001
    grad = np.zeros_like(x)

    it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
    while not it.finished:
        idx = it.multi_index
        tmp_val = x[idx]
        x[idx] = float(tmp_val) + h
        fxh1 = f(x)  # f(x+h)

        x[idx] = tmp_val - h
        fxh2 = f(x)  # f(x-h)
        grad[idx] = (fxh1 - fxh2) / (2 * h)

        x[idx] = tmp_val  # 还原值
        it.iternext()

    return grad 
开发者ID:wdxtub,项目名称:deep-learning-note,代码行数:21,代码来源:gradient.py

示例11: update

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import zeros_like [as 别名]
def update(self, index, weight, grad, state):
        self._update_count(index)
        wd = self._get_wd(index)
        lr = self._get_lr(index)
        num_rows = weight.shape[0]

        dn, n = state
        for row in range(num_rows):
            all_zeros = mx.test_utils.almost_equal(grad[row].asnumpy(), np.zeros_like(grad[row].asnumpy()))
            if all_zeros and self.lazy_update:
                continue
            grad[row] = grad[row] * self.rescale_grad
            if self.clip_gradient is not None:
                mx.nd.clip(grad[row], -self.clip_gradient, self.clip_gradient, out=grad[row])

            #update dn, n
            dn[row] += grad[row] - (mx.nd.sqrt(n[row] + grad[row] * grad[row]) - mx.nd.sqrt(n[row])) * weight[row] / lr
            n[row] += grad[row] * grad[row]

            # update weight
            weight[row] = (mx.nd.sign(dn[row]) * self.lamda1 - dn[row]) / \
                          ((self.beta + mx.nd.sqrt(n[row])) / lr + wd) * (mx.nd.abs(dn[row]) > self.lamda1) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:24,代码来源:test_optimizer.py

示例12: conjugate_gradient

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import zeros_like [as 别名]
def conjugate_gradient(f_Ax, b, cg_iters=10, residual_tol=1e-10):
  p = b.copy()
  r = b.copy()
  x = np.zeros_like(b)
  rdotr = r.dot(r)
  for i in xrange(cg_iters):
    z = f_Ax(p)
    v = rdotr / p.dot(z)
    x += v * p
    r -= v * z
    newrdotr = r.dot(r)
    mu = newrdotr / rdotr
    p = r + mu * p
    rdotr = newrdotr
    if rdotr < residual_tol:
      break
  return x 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:19,代码来源:trust_region.py

示例13: count_super

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import zeros_like [as 别名]
def count_super(p, m, counters, preds, labels, label_to_ch):
    
    for l in np.unique(labels):
        preds_l = preds[labels == l]
        
        # in -> known
        if label_to_ch[l]:
            acc = np.zeros_like(preds_l, dtype=bool)
            for c in label_to_ch[l]:
                if p == 0: counters['data'][m][c] += preds_l.shape[0]
                acc |= (preds_l == c)
            acc_sum = acc.sum()
            for c in label_to_ch[l]:
                counters['acc'][p,m][c] += acc_sum
        
        # out -> novel
        else:
            if p == 0: counters['data'][m][-1] += preds_l.shape[0]
            acc_sum = (preds_l < 0).sum()
            counters['acc'][p,m][-1] += acc_sum 
开发者ID:kibok90,项目名称:cvpr2018-hnd,代码行数:22,代码来源:test.py

示例14: evaluate

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import zeros_like [as 别名]
def evaluate(self, data_input_obs, data_input_ruitu, data_labels, data_ids, data_time, each_station_display=False):     
        all_loss=[]
        for i in range(10): # iterate for each station. (sample_ind, timestep, staionID, features)
            #batch_placeholders = np.zeros_like(data_labels[:,:,i,:])
            val_loss= self.model.evaluate(x=[data_input_obs[:,:,i,:], data_input_ruitu[:,:,i,:], data_ids[:,:,i], data_time],
                                y=[data_labels[:,:,i,:]], verbose=False)

            all_loss.append(val_loss)

            if each_station_display:
                print('\tFor station 9000{}, val loss: {}'.format(i+1, val_loss))
        
        self.current_mean_val_loss = np.mean(all_loss)
        print('Mean val loss:', self.current_mean_val_loss)

        self.val_loss_list.append(self.current_mean_val_loss) 
开发者ID:BruceBinBoxing,项目名称:Deep_Learning_Weather_Forecasting,代码行数:18,代码来源:seq2seq_class.py

示例15: evaluate

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import zeros_like [as 别名]
def evaluate(self, data_input_obs, data_input_ruitu, data_labels, data_ids, data_time, each_station_display=False):     
        all_loss=[]
        for i in range(10): # iterate for each station. (sample_ind, timestep, staionID, features)
            #batch_placeholders = np.zeros_like(data_labels[:,:,i,:])
            val_loss= self.model.evaluate(x=[data_input_obs[:,:,i,:], data_input_ruitu[:,:,i,:], data_ids[:,:,i], data_time],
                                y=[data_labels[:,:,i,:]], verbose=False)

            all_loss.append(val_loss)

            if each_station_display:
                print('\tFor station 9000{}, val MLE loss: {}'.format(i+1, val_loss))
        
        self.current_mean_val_loss = np.mean(all_loss)
        print('Mean val MLE loss:', self.current_mean_val_loss)

        self.val_loss_list.append(self.current_mean_val_loss) 
开发者ID:BruceBinBoxing,项目名称:Deep_Learning_Weather_Forecasting,代码行数:18,代码来源:competition_model_class.py


注:本文中的numpy.zeros_like方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。