当前位置: 首页>>代码示例>>Python>>正文


Python numpy.atleast_2d函数代码示例

本文整理汇总了Python中numpy.atleast_2d函数的典型用法代码示例。如果您正苦于以下问题:Python atleast_2d函数的具体用法?Python atleast_2d怎么用?Python atleast_2d使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了atleast_2d函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: fit

    def fit(self, X, y, learning_rate=0.2, epochs=10000):
        X = np.atleast_2d(X)
        temp = np.ones([X.shape[0], X.shape[1]+1])
        temp[:, 0:-1] = X
        X = temp
        y = np.array(y)

        for k in range(epochs):
            i = np.random.randint(X.shape[0])
            a = [X[i]]

            for l in range(len(self.weights)):
                a.append(self.activation(np.dot(a[l], self.weights[l])))

            error = y[i] - a[-1]
            # deltas 用于保存梯度项
            deltas = [error*self.activation_deriv(a[-1])]

            # 计算隐藏层的梯度项
            for l in range(len(a)-2, 0, -1):
                deltas.append(deltas[-1].dot(self.weights[l].T) * self.activation_deriv(a[l]))
            deltas.reverse()
            for i in range(len(self.weights)):
                layer = np.atleast_2d(a[i])
                delta = np.atleast_2d(deltas[i])
                self.weights[i] += learning_rate*layer.T.dot(delta)
开发者ID:su-kaiyao,项目名称:gold,代码行数:26,代码来源:neural_network.py

示例2: validate

    def validate(pos=None, text=None, anchor=None,
                 data_bounds=None,
                 ):

        if text is None:
            text = []
        if isinstance(text, string_types):
            text = [text]
        if pos is None:
            pos = np.zeros((len(text), 2))

        assert pos is not None
        pos = np.atleast_2d(pos)
        assert pos.ndim == 2
        assert pos.shape[1] == 2
        n_text = pos.shape[0]
        assert len(text) == n_text

        anchor = anchor if anchor is not None else (0., 0.)
        anchor = np.atleast_2d(anchor)
        if anchor.shape[0] == 1:
            anchor = np.repeat(anchor, n_text, axis=0)
        assert anchor.ndim == 2
        assert anchor.shape == (n_text, 2)

        if data_bounds is not None:
            data_bounds = _get_data_bounds(data_bounds, pos)
            assert data_bounds.shape[0] == n_text
            data_bounds = data_bounds.astype(np.float64)
            assert data_bounds.shape == (n_text, 4)

        return Bunch(pos=pos, text=text, anchor=anchor,
                     data_bounds=data_bounds)
开发者ID:kwikteam,项目名称:phy,代码行数:33,代码来源:visuals.py

示例3: hausdorffnorm

def hausdorffnorm(A, B):
    '''
    Finds the hausdorff norm between two matrices A and B.
    INPUTS:
    A: numpy array
    B : numpy array
    OUTPUTS:
    Housdorff norm between matrices A and B
    '''
    # ensure matrices are 3 dimensional, and shaped conformably
    if len(A.shape) == 1:
        A = np.atleast_2d(A)

    if len(B.shape) == 1:
        B = np.atleast_2d(B)

    A = np.atleast_3d(A)
    B = np.atleast_3d(B)

    x, y, z = B.shape
    A = np.reshape(A, (z, x, y))
    B = np.reshape(B, (z, x, y))

    # find hausdorff norm: starting from A to B
    z, x, y = B.shape
    temp1 = np.tile(np.reshape(B.T, (y, z, x)), (max(A.shape), 1))
    temp2 = np.tile(np.reshape(A.T, (y, x, z)), (1, max(B.shape)))
    D1 = np.min(np.sqrt(np.sum((temp1-temp2)**2, 0)), axis=0)

    # starting from B to A
    temp1 = np.tile(np.reshape(A.T, (y, z, x)), (max(B.shape), 1))
    temp2 = np.tile(np.reshape(B.T, (y, x, z)), (1, max(A.shape)))
    D2 = np.min(np.sqrt(np.sum((temp1-temp2)**2, 0)), axis=0)

    return np.max([D1, D2])
开发者ID:btengels,项目名称:supergametools,代码行数:35,代码来源:supergametools.py

示例4: bo_

def bo_(x_obs, y_obs):
    kernel = kernels.Matern() + kernels.WhiteKernel()
    gp = GaussianProcessRegressor(kernel=kernel, n_restarts_optimizer=16)
    gp.fit(x_obs, y_obs)

    xs = list(repeat(np.atleast_2d(np.linspace(0, 10, 128)).T, 2))
    x = cartesian_product(*xs)

    a = a_EI(gp, x_obs=x_obs, y_obs=y_obs)

    argmin_a_x = x[np.argmax(a(x))]

    # heavy evaluation
    print("f({})".format(argmin_a_x))
    f_argmin_a_x = f2d(np.atleast_2d(argmin_a_x))


    plot_2d(gp, x_obs, y_obs, argmin_a_x, a, xs)
    plt.show()


    bo_(
        x_obs=np.vstack((x_obs, argmin_a_x)),
        y_obs=np.hstack((y_obs, f_argmin_a_x)),
    )
开发者ID:Jim-Holmstroem,项目名称:bayesian-optimization,代码行数:25,代码来源:poc.py

示例5: fit

    def fit(self, x, y, learning_rate=0.2, epochs=10000):
        x = np.atleast_2d(x)
        # print x.shape[0], x.shape[1]+1
        temp = np.ones([x.shape[0], x.shape[1] + 1])
        temp[:, 0:-1] = x
        x = temp
        y = np.array(y)

        for k in range(epochs):
            i = np.random.randint(x.shape[0])
            a = [x[i]]

            for l in range(len(self.weights)):
                a.append(self.activation(np.dot(a[l],self.weights[l])))
            error = y[i] - a[-1]
            deltas = [error * self.activation_deriv(a[-1])]

            for l in range(len(a) - 2, 0, -1):
                deltas.append(deltas[-1].dot(self.weights[l].T)*self.activation_deriv(a[l]))

            deltas.reverse()
            for i in range(len(self.weights)):
                layer = np.atleast_2d(a[i])
                delta = np.atleast_2d(deltas[i])
                self.weights[i] += learning_rate * layer.T.dot(delta)
开发者ID:dds0414,项目名称:pyWorkspace,代码行数:25,代码来源:neuralNetwork.py

示例6: predict_proba

    def predict_proba(self, X):
        """Predict probability for each possible outcome.

        Compute the probability estimates for each single sample in X
        and each possible outcome seen during training (categorical
        distribution).

        Parameters
        ----------
        X : array_like, shape = [n_samples, n_features]

        Returns
        -------
        probabilities : array, shape = [n_samples, n_classes]
            Normalized probability distributions across
            class labels
        """
        if sparse.isspmatrix(X):
            X_2d = X
        else:
            X_2d = np.atleast_2d(X)
        weight_matrices = self._get_kernel(self.X_, X_2d)
        if self.kernel == 'knn':
            probabilities = []
            for weight_matrix in weight_matrices:
                ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
                probabilities.append(ine)
            probabilities = np.array(probabilities)
        else:
            weight_matrices = weight_matrices.T
            probabilities = np.dot(weight_matrices, self.label_distributions_)
        normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
        probabilities /= normalizer
        return probabilities
开发者ID:93sam,项目名称:scikit-learn,代码行数:34,代码来源:label_propagation.py

示例7: propagate_backward

    def propagate_backward(self, target, lrate=0.1, momentum=0.1):
        ''' Back propagate error related to target using lrate. '''

        deltas = []

        # Compute error on output layer
        error = target - self.layers[-1]
        delta = error*dsigmoid(self.layers[-1])
        deltas.append(delta)

        # Compute error on hidden layers
        for i in range(len(self.shape)-2,0,-1):
            delta = np.dot(deltas[0],self.weights[i].T)*dsigmoid(self.layers[i])
            deltas.insert(0,delta)
            
        # Update weights
        for i in range(len(self.weights)):
            layer = np.atleast_2d(self.layers[i])
            delta = np.atleast_2d(deltas[i])
            dw = np.dot(layer.T,delta)
            self.weights[i] += lrate*dw + momentum*self.dw[i]
            self.dw[i] = dw

        # Return error
        return (error**2).sum()
开发者ID:rainbowfighter,项目名称:MLP,代码行数:25,代码来源:sin_v6.py

示例8: _sanitize_pixel_positions

def _sanitize_pixel_positions(positions):

    if isinstance(positions, u.Quantity):
        if positions.unit is u.pixel:
            positions = positions.value
        else:
            raise u.UnitsError("positions should be in pixel units")

    if isinstance(positions, u.Quantity):
        positions = positions.value
    elif isinstance(positions, (list, tuple, np.ndarray)):
        positions = np.atleast_2d(positions)
        if positions.shape[1] != 2:
            if positions.shape[0] == 2:
                positions = np.transpose(positions)
            else:
                raise TypeError("List or array of (x, y) pixel coordinates "
                                "is expected got '{0}'.".format(positions))
    elif isinstance(positions, zip):
        # This is needed for zip to work seamlessly in Python 3
        positions = np.atleast_2d(list(positions))
    else:
        raise TypeError("List or array of (x, y) pixel coordinates "
                        "is expected got '{0}'.".format(positions))

    if positions.ndim > 2:
        raise ValueError('{0}-d position array not supported. Only 2-d '
                         'arrays supported.'.format(positions.ndim))

    return positions
开发者ID:mahmoud-lsw,项目名称:photutils,代码行数:30,代码来源:aperture_core.py

示例9: _cmeans_predict0

def _cmeans_predict0(test_data, cntr, u_old, c, m):
    """
    Single step in fuzzy c-means prediction algorithm. Clustering algorithm
    modified from Ross, Fuzzy Logic w/Engineering Applications (2010)
    p.352-353, equations 10.28 - 10.35, but this method to generate fuzzy
    predictions was independently derived by Josh Warner.

    Parameters inherited from cmeans()

    Very similar to initial clustering, except `cntr` is not updated, thus
    the new test data are forced into known (trained) clusters.

    """
    # Normalizing, then eliminating any potential zero values.
    u_old /= np.ones((c, 1)).dot(np.atleast_2d(u_old.sum(axis=0)))
    u_old = np.fmax(u_old, np.finfo(float).eps)

    um = u_old ** m
    test_data = test_data.T

    # For prediction, we do not recalculate cluster centers. The test_data is
    # forced to conform to the prior clustering.

    d = _distance(test_data, cntr)
    d = np.fmax(d, np.finfo(float).eps)

    jm = (um * d ** 2).sum()

    u = d ** (- 2. / (m - 1))
    u /= np.ones((c, 1)).dot(np.atleast_2d(u.sum(axis=0)))

    return u, jm, d
开发者ID:timesofbadri,项目名称:scikit-fuzzy,代码行数:32,代码来源:_cmeans.py

示例10: _cmeans0

def _cmeans0(data, u_old, c, m):
    """
    Single step in generic fuzzy c-means clustering algorithm. Modified from
    Ross, Fuzzy Logic w/Engineering Applications (2010) p.352-353, equations
    10.28 - 10.35.

    Parameters inherited from cmeans()

    This algorithm is a ripe target for Cython.

    """
    # Normalizing, then eliminating any potential zero values.
    u_old /= np.ones((c, 1)).dot(np.atleast_2d(u_old.sum(axis=0)))
    u_old = np.fmax(u_old, np.finfo(float).eps)

    um = u_old ** m

    # Calculate cluster centers
    data = data.T
    cntr = um.dot(data) / (np.ones((data.shape[1],
                                    1)).dot(np.atleast_2d(um.sum(axis=1))).T)

    d = _distance(data, cntr)
    d = np.fmax(d, np.finfo(float).eps)

    jm = (um * d ** 2).sum()

    u = d ** (- 2. / (m - 1))
    u /= np.ones((c, 1)).dot(np.atleast_2d(u.sum(axis=0)))

    return cntr, u, jm, d
开发者ID:timesofbadri,项目名称:scikit-fuzzy,代码行数:31,代码来源:_cmeans.py

示例11: _compute_model

 def _compute_model(self, pset):
     """Computes a model and inserts results into the Mongo collection."""
     nBands = fsps.driver.get_n_bands()
     nLambda = fsps.driver.get_n_lambda()
     nAges = fsps.driver.get_n_ages()
     fsps.driver.comp_sp(pset['dust_type'], pset['zmet'], pset['sfh'],
         pset['tau'], pset['const'], pset['fburst'], pset['tburst'],
         pset['dust_tesc'], pset['dust1'], pset['dust2'],
         pset['dust_clumps'], pset['frac_nodust'], pset['dust_index'],
         pset['mwr'], pset['wgp1'], pset['wgp2'], pset['wgp3'],
         pset['duste_gamma'], pset['duste_umin'], pset['duste_qpah'],
         pset['tage'])
     if pset['tage'] == 0.:
         # SFH over all ages is returned
         mags = fsps.driver.get_csp_mags(nBands, nAges)
         specs = fsps.driver.get_csp_specs(nLambda, nAges)
         age, mass, lbol, sfr, dust_mass = fsps.driver.get_csp_stats(nAges)
     else:
         # get only a single age, stored in first age bin
         # arrays must be re-formated to appear like one-age versions of
         # the outputs from get_csp_mags, etc.
         mags = fsps.driver.get_csp_mags_at_age(1, nBands)
         specs = fsps.driver.get_csp_specs_at_age(1, nLambda)
         age, mass, lbol, sfr, dust_mass \
                 = fsps.driver.get_csp_stats_at_age(1)
         age = np.atleast_1d(age)
         mass = np.atleast_1d(mass)
         lbol = np.atleast_1d(lbol)
         sfr = np.atleast_1d(sfr)
         dust_mass = np.atleast_1d(dust_mass)
         mags = np.atleast_2d(mags)
         specs = np.atleast_2d(specs)
     dataArray = self._splice_mag_spec_arrays(age, mass, lbol, sfr,
             dust_mass, mags, specs, nLambda)
     self._insert_model(pset.name, dataArray)
开发者ID:jonathansick,项目名称:pySPS,代码行数:35,代码来源:splib.py

示例12: assert_equal_from_matlab

def assert_equal_from_matlab(a, b, options=None):
    # Compares a and b for equality. They are all going to be numpy
    # types. hdf5storage and scipy behave differently when importing
    # arrays as to whether they are 2D or not, so we will make them all
    # at least 2D regardless. For strings, the two packages produce
    # transposed results of each other, so one just needs to be
    # transposed. For object arrays, each element must be iterated over
    # to be compared. For structured ndarrays, their fields need to be
    # compared and then they can be compared element and field
    # wise. Otherwise, they can be directly compared. Note, the type is
    # often converted by scipy (or on route to the file before scipy
    # gets it), so comparisons are done by value, which is not perfect.
    a = np.atleast_2d(a)
    b = np.atleast_2d(b)
    if a.dtype.char == 'U':
        a = a.T
    if b.dtype.name == 'object':
        a = a.flatten()
        b = b.flatten()
        for index, x in np.ndenumerate(a):
            assert_equal_from_matlab(a[index], b[index], options)
    elif b.dtype.names is not None or a.dtype.names is not None:
        assert a.dtype.names is not None
        assert b.dtype.names is not None
        assert set(a.dtype.names) == set(b.dtype.names)
        a = a.flatten()
        b = b.flatten()
        for k in b.dtype.names:
            for index, x in np.ndenumerate(a):
                assert_equal_from_matlab(a[k][index], b[k][index],
                                         options)
    else:
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', RuntimeWarning)
            npt.assert_equal(a, b)
开发者ID:sungjinlees,项目名称:hdf5storage,代码行数:35,代码来源:asserts.py

示例13: backPropagation

 def backPropagation(self):
     print "start back propagation"
     accuracy_prev = 0.0
     for step in range(0, self.backPropN):
         print "------------------------------"
         for (i,img) in zip(self.training_label,self.training_data): 
             output_ref = np.zeros((1,self.output_num),dtype=np.double)
             output_ref[0][int(i)] = 1.0
             self.run(img)
             # output error
             output_error = (self.output_output - output_ref) * self.sigmoid_d(self.output_output)
             # middle_error
             middle_error = np.dot(output_error,self.w2) * self.sigmoid_d(self.middle_output)
             middle_error = np.resize(middle_error,(1,self.middle_num))
             # w2 update
             self.w2 -= self.nu * np.dot(output_error.T,np.atleast_2d(self.middle_output)) 
             # w1 update
             self.w1 -= self.nu * np.dot(middle_error.T,np.atleast_2d(self.input_output))
         self.identify()
         if (self.accuracy < accuracy_prev):
             print "Warning: Accuracy is Decreasing !!"
         accuracy_prev = self.accuracy
         print "BackPropagation Step " + str(step+1) + " finished"
         print "------------------------------"
     np.savetxt("w1.txt",self.w1)
     np.savetxt("w2.txt",self.w2)
     print "w1 and w2 saved and back propagation finished"
开发者ID:knorth55,项目名称:pythonNN-mnist,代码行数:27,代码来源:np-NN.py

示例14: fit

    def fit(self, inputs, targets, learning_rate=0.2, epochs=10000):
        inputs = self.__add_bias(inputs, axis=1)
        targets = np.array(targets)

        for loop_cnt in xrange(epochs):
            # randomise the order of the inputs
            p = np.random.randint(inputs.shape[0])
            xp = inputs[p]
            bkp = targets[p]

            # forward phase
            gjp = self.__sigmoid(np.dot(self.v, xp))
            gjp = self.__add_bias(gjp)
            gkp = self.__sigmoid(np.dot(self.w, gjp))

            # backward phase(back prop)
            eps2 = self.__sigmoid_deriv(gkp) * (gkp - bkp)
            eps = self.__sigmoid_deriv(gjp) * np.dot(self.w.T, eps2)

            gjp = np.atleast_2d(gjp)
            eps2 = np.atleast_2d(eps2)
            self.w = self.w - learning_rate * np.dot(eps2.T, gjp)

            xp = np.atleast_2d(xp)
            eps = np.atleast_2d(eps)
            self.v = self.v - learning_rate * np.dot(eps.T, xp)[1:, :]
开发者ID:JackBass,项目名称:ml-algorithms-simple,代码行数:26,代码来源:MLP.py

示例15: aim

    def aim(self, yo, yp=None, z=None, a=None, surface=None, filter=True):
        if z is None:
            z = self.pupil_distance
        yo = np.atleast_2d(yo)
        if yp is not None:
            if a is None:
                a = self.pupil_radius
                a = np.array(((-a, -a), (a, a)))
            a = np.arctan2(a, z)
            yp = np.atleast_2d(yp)
            yp = self.map_pupil(yp, a, filter)
            yp = z*np.tan(yp)
            yo, yp = np.broadcast_arrays(yo, yp)

        y = np.zeros((yo.shape[0], 3))
        y[..., :2] = -yo*self.radius
        if surface:
            y[..., 2] = -surface.surface_sag(y)
        uz = (0, 0, z)
        if self.telecentric:
            u = uz
        else:
            u = uz - y
        if yp is not None:
            s, m = sagittal_meridional(u, uz)
            u += yp[..., 0, None]*s + yp[..., 1, None]*m
        normalize(u)
        if z < 0:
            u *= -1
        return y, u
开发者ID:ki113r4bbi7,项目名称:rayopt,代码行数:30,代码来源:conjugates.py


注:本文中的numpy.atleast_2d函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。