當前位置: 首頁>>代碼示例>>Python>>正文


Python numpy.full方法代碼示例

本文整理匯總了Python中autograd.numpy.full方法的典型用法代碼示例。如果您正苦於以下問題:Python numpy.full方法的具體用法?Python numpy.full怎麽用?Python numpy.full使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在autograd.numpy的用法示例。


在下文中一共展示了numpy.full方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: plot_images

# 需要導入模塊: from autograd import numpy [as 別名]
# 或者: from autograd.numpy import full [as 別名]
def plot_images(images, ax, ims_per_row=5, padding=5, digit_dimensions=(28, 28),
                cmap=matplotlib.cm.binary, vmin=None, vmax=None):
    """Images should be a (N_images x pixels) matrix."""
    N_images = images.shape[0]
    N_rows = (N_images - 1) // ims_per_row + 1
    pad_value = np.min(images.ravel())
    concat_images = np.full(((digit_dimensions[0] + padding) * N_rows + padding,
                             (digit_dimensions[1] + padding) * ims_per_row + padding), pad_value)
    for i in range(N_images):
        cur_image = np.reshape(images[i, :], digit_dimensions)
        row_ix = i // ims_per_row
        col_ix = i % ims_per_row
        row_start = padding + (padding + digit_dimensions[0]) * row_ix
        col_start = padding + (padding + digit_dimensions[1]) * col_ix
        concat_images[row_start: row_start + digit_dimensions[0],
                      col_start: col_start + digit_dimensions[1]] = cur_image
    cax = ax.matshow(concat_images, cmap=cmap, vmin=vmin, vmax=vmax)
    plt.xticks(np.array([]))
    plt.yticks(np.array([]))
    return cax 
開發者ID:HIPS,項目名稱:autograd,代碼行數:22,代碼來源:data.py

示例2: __init__

# 需要導入模塊: from autograd import numpy [as 別名]
# 或者: from autograd.numpy import full [as 別名]
def __init__(self, sigma2s, wts=None):
        """
        Mixture of isotropic Gaussian kernels:
          sum wts[i] * exp(- ||x - y||^2 / (2 * sigma2s[i]))

        sigma2s: a list/array of squared bandwidths
        wts: a list/array of weights. Defaults to equal weights summing to 1.
        """
        self.sigma2s = sigma2s = np.asarray(sigma2s)
        assert len(sigma2s) > 0

        if wts is None:
            self.wts = wts = np.full(len(sigma2s), 1/len(sigma2s))
        else:
            self.wts = wts = np.asarray(wts)
            assert len(wts) == len(sigma2s)
            assert all(w >= 0 for w in wts) 
開發者ID:wittawatj,項目名稱:kernel-gof,代碼行數:19,代碼來源:kernel.py

示例3: setup

# 需要導入模塊: from autograd import numpy [as 別名]
# 或者: from autograd.numpy import full [as 別名]
def setup(self, x_shape):
        """
        Parameters
        ----------
        x_shape : np.array(batch size, time steps, input shape)
        """
        self.input_dim = x_shape[2]

        # Input -> Hidden
        self._params["W"] = self._params.init((self.input_dim, self.hidden_dim))
        # Bias
        self._params["b"] = np.full((self.hidden_dim,), self._params.initial_bias)
        # Hidden -> Hidden layer
        self._params["U"] = self.inner_init((self.hidden_dim, self.hidden_dim))

        # Init gradient arrays
        self._params.init_grad()

        self.hprev = np.zeros((x_shape[0], self.hidden_dim)) 
開發者ID:rushter,項目名稱:MLAlgorithms,代碼行數:21,代碼來源:rnn.py

示例4: get_scale

# 需要導入模塊: from autograd import numpy [as 別名]
# 或者: from autograd.numpy import full [as 別名]
def get_scale(n, scale_factor):
        return anp.power(anp.full(n, scale_factor), anp.arange(n)) 
開發者ID:msu-coinlab,項目名稱:pymoo,代碼行數:4,代碼來源:dtlz.py

示例5: get_power

# 需要導入模塊: from autograd import numpy [as 別名]
# 或者: from autograd.numpy import full [as 別名]
def get_power(self, n):
        p = anp.full(n, 4.0)
        p[-1] = 2.0
        return p 
開發者ID:msu-coinlab,項目名稱:pymoo,代碼行數:6,代碼來源:dtlz.py

示例6: _calc_pareto_set

# 需要導入模塊: from autograd import numpy [as 別名]
# 或者: from autograd.numpy import full [as 別名]
def _calc_pareto_set(self):
        return np.full(self.n_var, 0) 
開發者ID:msu-coinlab,項目名稱:pymoo,代碼行數:4,代碼來源:griewank.py

示例7: _calc_pareto_set

# 需要導入模塊: from autograd import numpy [as 別名]
# 或者: from autograd.numpy import full [as 別名]
def _calc_pareto_set(self):
        return anp.full(self.n_var, 0) 
開發者ID:msu-coinlab,項目名稱:pymoo,代碼行數:4,代碼來源:ackley.py

示例8: _calc_pareto_set

# 需要導入模塊: from autograd import numpy [as 別名]
# 或者: from autograd.numpy import full [as 別名]
def _calc_pareto_set(self):
        return anp.full(self.n_var, 0.5) 
開發者ID:msu-coinlab,項目名稱:pymoo,代碼行數:4,代碼來源:sphere.py

示例9: _calc_pareto_set

# 需要導入模塊: from autograd import numpy [as 別名]
# 或者: from autograd.numpy import full [as 別名]
def _calc_pareto_set(self):
        return np.full(self.n_var, 420.9687) 
開發者ID:msu-coinlab,項目名稱:pymoo,代碼行數:4,代碼來源:schwefel.py

示例10: make_grad_softplus

# 需要導入模塊: from autograd import numpy [as 別名]
# 或者: from autograd.numpy import full [as 別名]
def make_grad_softplus(ans, x):
    x = np.asarray(x)
    def gradient_product(g):
        return np.full(x.shape, g) * np.exp(x - ans)
    return gradient_product 
開發者ID:dtak,項目名稱:tree-regularization-public,代碼行數:7,代碼來源:model.py

示例11: logsumexp_vjp

# 需要導入模塊: from autograd import numpy [as 別名]
# 或者: from autograd.numpy import full [as 別名]
def logsumexp_vjp(ans, x):
    # If you want to be able to take higher-order derivatives, then all the
    # code inside this function must be itself differentiable by Autograd.
    # This closure multiplies g with the Jacobian of logsumexp (d_ans/d_x).
    # Because Autograd uses reverse-mode differentiation, g contains
    # the gradient of the objective w.r.t. ans, the output of logsumexp.
    # This returned VJP function doesn't close over `x`, so Python can
    # garbage-collect `x` if there are no references to it elsewhere.
    x_shape = x.shape
    return lambda g: np.full(x_shape, g) * np.exp(x - np.full(x_shape, ans))

# Now we tell Autograd that logsumexmp has a gradient-making function. 
開發者ID:HIPS,項目名稱:autograd,代碼行數:14,代碼來源:define_gradient.py

示例12: uniform_reference_directions

# 需要導入模塊: from autograd import numpy [as 別名]
# 或者: from autograd.numpy import full [as 別名]
def uniform_reference_directions(self, n_partitions, n_dim):
        ref_dirs = []
        ref_dir = anp.full(n_dim, anp.inf)
        self.__uniform_reference_directions(ref_dirs, ref_dir, n_partitions, n_partitions, 0)
        return anp.concatenate(ref_dirs, axis=0) 
開發者ID:msu-coinlab,項目名稱:pymop,代碼行數:7,代碼來源:util.py

示例13: get_power

# 需要導入模塊: from autograd import numpy [as 別名]
# 或者: from autograd.numpy import full [as 別名]
def get_power(n):
        p = anp.full(n, 4.0)
        p[-1] = 2.0
        return p 
開發者ID:msu-coinlab,項目名稱:pymop,代碼行數:6,代碼來源:dtlz.py

示例14: setup

# 需要導入模塊: from autograd import numpy [as 別名]
# 或者: from autograd.numpy import full [as 別名]
def setup(self, x_shape):
        """
        Naming convention:
        i : input gate
        f : forget gate
        c : cell
        o : output gate

        Parameters
        ----------
        x_shape : np.array(batch size, time steps, input shape)
        """
        self.input_dim = x_shape[2]
        # Input -> Hidden
        W_params = ["W_i", "W_f", "W_o", "W_c"]
        # Hidden -> Hidden
        U_params = ["U_i", "U_f", "U_o", "U_c"]
        # Bias terms
        b_params = ["b_i", "b_f", "b_o", "b_c"]

        # Initialize params
        for param in W_params:
            self._params[param] = self._params.init((self.input_dim, self.hidden_dim))

        for param in U_params:
            self._params[param] = self.inner_init((self.hidden_dim, self.hidden_dim))

        for param in b_params:
            self._params[param] = np.full((self.hidden_dim,), self._params.initial_bias)

        # Combine weights for simplicity
        self.W = [self._params[param] for param in W_params]
        self.U = [self._params[param] for param in U_params]

        # Init gradient arrays for all weights
        self._params.init_grad()

        self.hprev = np.zeros((x_shape[0], self.hidden_dim))
        self.oprev = np.zeros((x_shape[0], self.hidden_dim)) 
開發者ID:rushter,項目名稱:MLAlgorithms,代碼行數:41,代碼來源:lstm.py


注:本文中的autograd.numpy.full方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。