当前位置: 首页>>代码示例>>Python>>正文


Python numpy.asarray函数代码示例

本文整理汇总了Python中numpy.asarray函数的典型用法代码示例。如果您正苦于以下问题:Python asarray函数的具体用法?Python asarray怎么用?Python asarray使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了asarray函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _testDefaultBasic

  def _testDefaultBasic(self, dtype):
    indices = np.asarray([0, 2, -1, 1], dtype=dtype)
    depth = 3

    truth = np.asarray(
            [[1.0, 0.0, 0.0],
             [0.0, 0.0, 1.0],
             [0.0, 0.0, 0.0],
             [0.0, 1.0, 0.0]],
            dtype=dtype)

    # axis == -1
    self._testBothOneHot(
            indices=indices,
            depth=depth,
            dtype=dtype,
            truth=truth)

    # axis == 0
    self._testBothOneHot(
            indices=indices,
            depth=depth,
            axis=0,
            dtype=dtype,
            truth=truth.T)  # Output is transpose version in this case
开发者ID:0-T-0,项目名称:tensorflow,代码行数:25,代码来源:one_hot_op_test.py

示例2: time_column

def time_column(table, ifo=None):
    """Extract the 'time' column from the given table.

    This function uses time_func to determine the correct column to
    use as a proxy for 'time' and returns that column.
    The following mappings are used:
    - `sngl_inspiral` -> 'end' time
    - `sngl_burst` -> 'peak' time
    - `sngl_ringdown` -> 'start' time

    @param table
        any `LIGO_LW` table
    @param ifo
        an interferometer prefix if you want single-detector times

    @returns a numpy array object with a 'time' element for each row in
    the table
    """
    if hasattr(table, "get_time"):
        return numpy.asarray(table.get_time())
    func_name = time_func(ligolw_table.StripTableName(table.tableName)).__name__
    if hasattr(table, func_name):
        return numpy.asarray(getattr(table, func_name)())
    else:
        return numpy.asarray(map(func_name, table))
开发者ID:smirshekari,项目名称:lalsuite,代码行数:25,代码来源:utils.py

示例3: load_adm_sat_school_data

def load_adm_sat_school_data(return_X_y=False):

    with open("./merged_adm_sat_data.csv") as csv_file:
        data_file = csv.reader(csv_file)
        temp = next(data_file)
        n_samples = int(temp[0])
        n_features = int(temp[1])
        target_names = np.array(temp[2:])


    df = pd.read_csv("./merged_adm_sat_data.csv", sep=",", usecols=(0, 1, 2, 3), skiprows=0)
    data = np.empty((n_samples, n_features), dtype=int)
    target = np.ma.empty((n_samples,), dtype=int)

    for index, row in df.iterrows():
        data[index] = np.asarray([df.iloc[index][0], df.iloc[index][1], df.iloc[index][2]], dtype=np.float)
        target[index] = np.asarray(df.iloc[index][3], dtype=np.int)

    feature_names = np.array(['ACT_AVG','SAT_AVG','GRAD_DEBT','REGION'])

    if return_X_y:
        return data, target

    return datasets.base.Bunch(data=data, target=target,
                 target_names=target_names,
                 DESCR='School Data set',
                 feature_names=feature_names)
开发者ID:DistrictDataLabs,项目名称:yellowbrick,代码行数:27,代码来源:testing.py

示例4: test_layer_div

  def test_layer_div(self):
 # Ensure layer division  gives the correct output
      layer_o=self.layer6/self.layer7
      array1=np.asarray(layer_o.get_nparray())
      res = np.asarray([[0.2]*3]*3)
      self.assertEqual(np.all(array1==0.2),True)
      self.assertTrue(allequal(layer_o._data, res))
开发者ID:JayakrishnanAjayakumar,项目名称:pcml,代码行数:7,代码来源:test_layer.py

示例5: test_layer_sub

 def test_layer_sub(self):
 # Ensure layer subtraction  gives the correct output
     layer_o=self.layer6-self.layer7
     array1=np.asarray(layer_o.get_nparray())
     res = np.asarray([[-4]*3]*3)
     self.assertEqual(np.all(array1==-4),True)
     self.assertTrue(allequal(layer_o._data, res))
开发者ID:JayakrishnanAjayakumar,项目名称:pcml,代码行数:7,代码来源:test_layer.py

示例6: save_nodes_to_store

  def save_nodes_to_store(self, store, queue):
    for node_id, node in self.nodes.items():
      features = {}
      features['neighbors'] = node['neighbors']
      if 'soft_label' in self.nodes_features:
        features['soft_label'] = node['soft_label']
      if 'size' in self.nodes_features:
        features['size'] = len(node['pos'])
      if 'pos' in self.nodes_features:
        features['pos'] = np.asarray(node['pos'])
        if features['pos'].shape == (0,):
          features['pos'] = np.zeros(shape=(0,3))
      if 'mesh' in self.nodes_features:
        #Because ml incluedes the overlap is possible
        #That a node has a mesh in the overlap
        #But not a single voxel in the non-overlap region
        vertices, triangles = mesh.marche_cubes( node_id , self.ml )
        vertices += np.asarray(self.start).astype(np.uint16) * 2 #translate mesh
        features['mesh'] = mesh.get_adjacent( vertices, triangles )
      if 'semantic_sum' in self.nodes_features:
        features['semantic_sum'] = node['semantic_sum']

      features['tree'] = Tree(node_id)
      existent_node_features = store.get_node(node_id)
      if existent_node_features:
        features = self.sum_nodes_features(existent_node_features, features )
      store.put_node(node_id, features)
开发者ID:tartavull,项目名称:tigertrace,代码行数:27,代码来源:construct.py

示例7: encode_doc

def encode_doc(doc, max_len):
    if doc is None:
        return np.asarray([])

    # enc = np.asarray([max(min(ord(c), max_char-1), 0) for c in doc[:max_len]])
    enc = np.asarray([vocab.token2id.get(c, default_id) for c in itertools.islice(gensim.utils.tokenize(doc, to_lower=True), max_len)])
    return enc
开发者ID:GALI472,项目名称:ir-2016,代码行数:7,代码来源:lstm_for_nonfactoid_qa.py

示例8: __init__

  def __init__(self, qdir='GM', verbosity=1, filepattern=None):
    """
      qdir      ... (opt) 'GM' or 'GK' for symmetry direction of q
      verbosity ... (opt) 0 silent, 1 minimal output, 3 debug, >3 debug interpolation
      filepattern.. (opt) read eps2D from files matching filepattern
                          qdir has no effect in this case
    """
    self.qdir      = qdir;
    self.verbosity = verbosity;

    # read DP-output files containing EPS2D (sorted by momentum transfer)
    if filepattern is None:
      self.path   = os.path.dirname(os.path.abspath(__file__))+'/data/'+qdir;
      filepattern = self.path+'/CUTOFF_R12.6_grapheneAA-2d0-HIGH-RPA*-high-%s-q*_outlf.eps'%(self.qdir);
    self.spectra= dp_mdf.GetMDF(filepattern);
    self.spectra.sort(key=lambda mdf: np.linalg.norm(mdf.get_q('cc','au')));

    # check that we have eps2D
    assert len(self.spectra)>0
    for mdf in self.spectra:
      assert mdf.param['quantity']=='mdf';
      assert (''.join(mdf.param['comment'])).find('eps2D'); 

    # extract data
    self.eps2D  = np.asarray([ mdf.eps for mdf in self.spectra ]);
    q           = [ np.linalg.norm(mdf.get_q('cc','au')) for mdf in self.spectra ];
    self.q      = np.asarray(q, dtype=float);  # in 1/bohr
    self.E      = self.spectra[0].get_E();     # in eV
    self.calc_param = deepcopy(self.spectra[0].param);
    self.set_qprecision();
开发者ID:rhambach,项目名称:EELcalc,代码行数:30,代码来源:rpa.py

示例9: set_data

    def set_data(self, x, y, A):
        A = cbook.safe_masked_invalid(A)
        if x is None:
            x = np.arange(0, A.shape[1]+1, dtype=np.float64)
        else:
            x = np.asarray(x, np.float64).ravel()
        if y is None:
            y = np.arange(0, A.shape[0]+1, dtype=np.float64)
        else:
            y = np.asarray(y, np.float64).ravel()

        if A.shape[:2] != (y.size-1, x.size-1):
            print A.shape
            print y.size
            print x.size
            raise ValueError("Axes don't match array shape")
        if A.ndim not in [2, 3]:
            raise ValueError("A must be 2D or 3D")
        if A.ndim == 3 and A.shape[2] == 1:
            A.shape = A.shape[:2]
        self.is_grayscale = False
        if A.ndim == 3:
            if A.shape[2] in [3, 4]:
                if (A[:,:,0] == A[:,:,1]).all() and (A[:,:,0] == A[:,:,2]).all():
                    self.is_grayscale = True
            else:
                raise ValueError("3D arrays must have RGB or RGBA as last dim")
        self._A = A
        self._Ax = x
        self._Ay = y
        self._rgbacache = None
开发者ID:CTPUG,项目名称:matplotlib,代码行数:31,代码来源:image.py

示例10: test_sym_matrix_to_vec

def test_sym_matrix_to_vec():
    sym = np.ones((3, 3))
    sqrt2 = 1. / sqrt(2.)
    vec = np.array([sqrt2, 1., sqrt2, 1., 1., sqrt2])
    assert_array_almost_equal(sym_matrix_to_vec(sym), vec)

    vec = np.array([1., 1., 1.])
    assert_array_almost_equal(sym_matrix_to_vec(sym, discard_diagonal=True),
                              vec)

    # Check sym_matrix_to_vec is the inverse function of vec_to_sym_matrix
    n = 5
    p = n * (n + 1) // 2
    rand_gen = np.random.RandomState(0)
    # when diagonal is included
    vec = rand_gen.rand(p)
    sym = vec_to_sym_matrix(vec)
    assert_array_almost_equal(sym_matrix_to_vec(sym), vec)

    # when diagonal given separately
    diagonal = rand_gen.rand(n + 1)
    sym = vec_to_sym_matrix(vec, diagonal=diagonal)
    assert_array_almost_equal(sym_matrix_to_vec(sym, discard_diagonal=True),
                              vec)

    # multiple matrices case when diagonal is included
    vecs = np.asarray([vec, 2. * vec, 0.5 * vec])
    syms = vec_to_sym_matrix(vecs)
    assert_array_almost_equal(sym_matrix_to_vec(syms), vecs)

    # multiple matrices case when diagonal is given seperately
    diagonals = np.asarray([diagonal, 3. * diagonal, -diagonal])
    syms = vec_to_sym_matrix(vecs, diagonal=diagonals)
    assert_array_almost_equal(sym_matrix_to_vec(syms, discard_diagonal=True),
                              vecs)
开发者ID:bthirion,项目名称:nilearn,代码行数:35,代码来源:test_connectivity_matrices.py

示例11: segment_haar

def segment_haar(cnarr):
    """Do segmentation for CNVkit.

    Calculate copy number segmentation by HaarSeg
    (http://haarseg.r-forge.r-project.org/)
    Input: log2 coverage data in Nexus 'basic' format
    Output: the CBS data table

    """
    chrom_tables = []
    # Segment each chromosome individually
    # ENH - skip large gaps (segment chrom. arms separately)
    for chrom, subprobes in cnarr.by_chromosome():
        # echo(chrom, ':')  # DBG
        segtable = haarSeg(subprobes['log2'])
        chromtable = pd.DataFrame({
            'chromosome': chrom,
            'start': np.asarray(subprobes['start']).take(segtable['start']),
            'end': np.asarray(subprobes['end']
                             ).take(segtable['start']+segtable['size']-1),
            'gene': '.',
            'log2': segtable['log2'],
            'probes': segtable['size'],
        })
        # echo(chromtable)  # DBG
        chrom_tables.append(chromtable)
    result = pd.concat(chrom_tables)
    echo("haar: Found", len(result), "segments")
    segarr = cnarr.as_dataframe(result)
    segarr.sort_columns()
    return segarr
开发者ID:zengfengbo,项目名称:cnvkit,代码行数:31,代码来源:haar.py

示例12: _testTensorArrayWriteConcat

  def _testTensorArrayWriteConcat(self, tf_dtype):
    dtype = tf_dtype.as_numpy_dtype()
    with self.test_session(use_gpu=self._use_gpu):
      ta = tensor_array_ops.TensorArray(
          dtype=tf_dtype, tensor_array_name="foo", size=3, infer_shape=False)

      if tf_dtype == tf.string:
        # In Python3, np.str is unicode, while we always want bytes
        convert = lambda x: np.asarray(x).astype("|S")
      else:
        convert = lambda x: np.asarray(x).astype(dtype)

      w0 = ta.write(0, convert([[4.0, 5.0], [104.0, 105.0], [204.0, 205.0]]))
      w1 = w0.write(1, convert([[6.0, 7.0], [106.0, 107.0]]))
      w2 = w1.write(2, convert([[8.0, 9.0]]))

      c0 = w2.concat()

      self.assertAllEqual(
          convert([[4.0, 5.0],
                   [104.0, 105.0],
                   [204.0, 205.0],
                   [6.0, 7.0],
                   [106.0, 107.0],
                   [8.0, 9.0]]), c0.eval())
开发者ID:bsantanas,项目名称:tensorflow,代码行数:25,代码来源:tensor_array_ops_test.py

示例13: access_Measurement

def access_Measurement(lat, long, year):
    path_ccsm4 = '/Users/DavidKMYang/ClimateResearch/WBGT/ccsm4_tasmax_nepal/'

    os.chdir(path_ccsm4)
    file_names_ccsm4 = glob.glob("tasmax_" + str(year)+"*.mat")

    for i in range(len(file_names_ccsm4)):
        lat_index = 0
        long_index = 0
        print (file_names_ccsm4[i])
        tempData = scipy.io.loadmat(path_ccsm4 + file_names_ccsm4[i])
        tempData = tempData[file_names_ccsm4[i][:-4]][0]

        tempLatList = []
        for k in range(len(tempData[0])):
            tempLatList.append(tempData[0][k][0])
        tempLatList = np.asarray(tempLatList)
        lat_index = find_nearest(tempLatList, lat)

        tempLongList = tempData[1][0]
        tempLongList = np.asarray(tempLongList)

        long_index = find_nearest(tempLongList, long)
        print (tempLatList[lat_index])
        print (tempLongList[long_index])
        print (tempData[2][lat_index][long_index])


        access_Measurement(25, 30, 2001)
开发者ID:dkmy,项目名称:Climate_Research_Project,代码行数:29,代码来源:MAT_Search.py

示例14: shared

 def shared(data):
     """ Place the data into shared variables. This allows Theano to copy
     the data to the GPU, if one is available.
     """
     shared_x = theano.shared(numpy.asarray(data[:,0].tolist(), dtype=theano.config.floatX), borrow=True)
     shared_y = theano.shared(numpy.asarray(data[:,1].tolist(), dtype=theano.config.floatX), borrow=True)
     return shared_x, T.cast(shared_y, "int32")
开发者ID:Nueard,项目名称:Genetic-algorithms-neural-networks,代码行数:7,代码来源:DataLoader.py

示例15: test_cross_validator_with_default_indices

def test_cross_validator_with_default_indices():
    n_samples = 4
    n_unique_labels = 4
    n_folds = 2
    p = 2
    n_iter = 10  # (the default value)

    X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
    X_1d = np.array([1, 2, 3, 4])
    y = np.array([1, 1, 2, 2])
    labels = np.array([1, 2, 3, 4])
    loo = LeaveOneOut()
    lpo = LeavePOut(p)
    kf = KFold(n_folds)
    skf = StratifiedKFold(n_folds)
    lolo = LeaveOneLabelOut()
    lopo = LeavePLabelOut(p)
    ss = ShuffleSplit(random_state=0)
    ps = PredefinedSplit([1, 1, 2, 2])  # n_splits = np of unique folds = 2

    n_splits = [n_samples, comb(n_samples, p), n_folds, n_folds,
                n_unique_labels, comb(n_unique_labels, p), n_iter, 2]

    for i, cv in enumerate([loo, lpo, kf, skf, lolo, lopo, ss, ps]):
        # Test if get_n_splits works correctly
        assert_equal(n_splits[i], cv.get_n_splits(X, y, labels))

        # Test if the cross-validator works as expected even if
        # the data is 1d
        np.testing.assert_equal(list(cv.split(X, y, labels)),
                                list(cv.split(X_1d, y, labels)))
        # Test that train, test indices returned are integers
        for train, test in cv.split(X, y, labels):
            assert_equal(np.asarray(train).dtype.kind, 'i')
            assert_equal(np.asarray(train).dtype.kind, 'i')
开发者ID:absolutelyNoWarranty,项目名称:scikit-learn,代码行数:35,代码来源:test_split.py


注:本文中的numpy.asarray函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。