当前位置: 首页>>代码示例>>Python>>正文


Python numpy.concatenate方法代码示例

本文整理汇总了Python中numpy.concatenate方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.concatenate方法的具体用法?Python numpy.concatenate怎么用?Python numpy.concatenate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在numpy的用法示例。


在下文中一共展示了numpy.concatenate方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: convert

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import concatenate [as 别名]
def convert(story):
    # import pdb; pdb.set_trace()
    sentence_arr, graphs, query_arr, answer_arr = story
    node_id_w = graphs[2].shape[2]
    edge_type_w = graphs[3].shape[3]

    all_node_strengths = [np.zeros([1])]
    all_node_ids = [np.zeros([1,node_id_w])]
    for num_new_nodes, new_node_strengths, new_node_ids, _ in zip(*graphs):
        last_strengths = all_node_strengths[-1]
        last_ids = all_node_ids[-1]

        cur_strengths = np.concatenate([last_strengths, new_node_strengths], 0)
        cur_ids = np.concatenate([last_ids, new_node_ids], 0)

        all_node_strengths.append(cur_strengths)
        all_node_ids.append(cur_ids)

    all_edges = graphs[3]
    full_n_nodes = all_edges.shape[1]
    all_node_strengths = np.stack([np.pad(x, ((0, full_n_nodes-x.shape[0])), 'constant') for x in all_node_strengths[1:]])
    all_node_ids = np.stack([np.pad(x, ((0, full_n_nodes-x.shape[0]), (0, 0)), 'constant') for x in all_node_ids[1:]])
    all_node_states = np.zeros([len(all_node_strengths), full_n_nodes,0])

    return tuple(x[np.newaxis,...] for x in (all_node_strengths, all_node_ids, all_node_states, all_edges)) 
开发者ID:hexahedria,项目名称:gated-graph-transformer-network,代码行数:27,代码来源:convert_story.py

示例2: load_encodings

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import concatenate [as 别名]
def load_encodings():
    """
    加载保存的历史人脸向量,以及name向量,并返回
    :return:
    """
    known_face_encodings = np.load(KNOWN_FACE_ENCODINGS)
    known_face_names = np.load(KNOWN_FACE_NANE)
    if not os.path.exists(KNOWN_FACE_NANE) or not os.path.exists(KNOWN_FACE_ENCODINGS):
        encoding_images(data_path)
    aa = [file for file in os.listdir(data_path) if os.path.isfile(os.path.join(data_path, file)) and file.endswith("npy")]
    # ("known_face_encodings_") or file.startswith("known_face_name_"))
    for data in aa:
        if data.startswith('known_face_encodings_'):
            tmp_face_encodings = np.load(os.path.join(data_path,data))
            known_face_encodings = np.concatenate((known_face_encodings, tmp_face_encodings), axis=0)
            print("load ", data)
        elif data.startswith('known_face_name_'):
            tmp_face_name = np.load(os.path.join(data_path, data))
            known_face_names = np.concatenate((known_face_names, tmp_face_name), axis=0)
            print("load ", data)
        else:
            print('skip to load original ', data)
    return known_face_encodings,known_face_names 
开发者ID:matiji66,项目名称:face-attendance-machine,代码行数:25,代码来源:encoding_images.py

示例3: __iter__

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import concatenate [as 别名]
def __iter__(self):
        indices = []
        for i, size in enumerate(self.group_sizes):
            if size == 0:
                continue
            indice = np.where(self.flag == i)[0]
            assert len(indice) == size
            np.random.shuffle(indice)
            num_extra = int(np.ceil(size / self.samples_per_gpu)
                            ) * self.samples_per_gpu - len(indice)
            indice = np.concatenate(
                [indice, np.random.choice(indice, num_extra)])
            indices.append(indice)
        indices = np.concatenate(indices)
        indices = [
            indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu]
            for i in np.random.permutation(
                range(len(indices) // self.samples_per_gpu))
        ]
        indices = np.concatenate(indices)
        indices = indices.astype(np.int64).tolist()
        assert len(indices) == self.num_samples
        return iter(indices) 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:25,代码来源:group_sampler.py

示例4: train_lr_rfeinman

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import concatenate [as 别名]
def train_lr_rfeinman(densities_pos, densities_neg, uncerts_pos, uncerts_neg):
    """
    TODO
    :param densities_pos:
    :param densities_neg:
    :param uncerts_pos:
    :param uncerts_neg:
    :return:
    """
    values_neg = np.concatenate(
        (densities_neg.reshape((1, -1)),
         uncerts_neg.reshape((1, -1))),
        axis=0).transpose([1, 0])
    values_pos = np.concatenate(
        (densities_pos.reshape((1, -1)),
         uncerts_pos.reshape((1, -1))),
        axis=0).transpose([1, 0])

    values = np.concatenate((values_neg, values_pos))
    labels = np.concatenate(
        (np.zeros_like(densities_neg), np.ones_like(densities_pos)))

    lr = LogisticRegressionCV(n_jobs=-1).fit(values, labels)

    return values, labels, lr 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:27,代码来源:util.py

示例5: compute_roc_rfeinman

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import concatenate [as 别名]
def compute_roc_rfeinman(probs_neg, probs_pos, plot=False):
    """
    TODO
    :param probs_neg:
    :param probs_pos:
    :param plot:
    :return:
    """
    probs = np.concatenate((probs_neg, probs_pos))
    labels = np.concatenate((np.zeros_like(probs_neg), np.ones_like(probs_pos)))
    fpr, tpr, _ = roc_curve(labels, probs)
    auc_score = auc(fpr, tpr)
    if plot:
        plt.figure(figsize=(7, 6))
        plt.plot(fpr, tpr, color='blue',
                 label='ROC (AUC = %0.4f)' % auc_score)
        plt.legend(loc='lower right')
        plt.title("ROC Curve")
        plt.xlabel("FPR")
        plt.ylabel("TPR")
        plt.show()

    return fpr, tpr, auc_score 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:25,代码来源:util.py

示例6: block_split

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import concatenate [as 别名]
def block_split(X, Y):
    """
    Split the data into 80% for training and 20% for testing
    in a block size of 100.
    :param X: 
    :param Y: 
    :return: 
    """
    print("Isolated split 80%, 20% for training and testing")
    num_samples = X.shape[0]
    partition = int(num_samples/3)
    X_adv, Y_adv = X[:partition], Y[:partition]
    X_norm, Y_norm = X[partition:2*partition], Y[partition:2*partition]
    X_noisy, Y_noisy = X[2*partition:], Y[2*partition:]

    num_train = int(partition * 0.008) * 100
    X_train = np.concatenate((X_adv[:num_train], X_norm[:num_train], X_noisy[:num_train]))
    Y_train = np.concatenate((Y_adv[:num_train], Y_norm[:num_train], Y_noisy[:num_train]))

    X_test = np.concatenate((X_adv[num_train:], X_norm[num_train:], X_noisy[num_train:]))
    Y_test = np.concatenate((Y_adv[num_train:], Y_norm[num_train:], Y_noisy[num_train:]))

    return X_train, Y_train, X_test, Y_test 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:25,代码来源:util.py

示例7: _prepro_cpg

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import concatenate [as 别名]
def _prepro_cpg(self, states, dists):
        """Preprocess the state and distance of neighboring CpG sites."""
        prepro_states = []
        prepro_dists = []
        for state, dist in zip(states, dists):
            nan = state == dat.CPG_NAN
            if np.any(nan):
                state[nan] = np.random.binomial(1, state[~nan].mean(),
                                                nan.sum())
                dist[nan] = self.cpg_max_dist
            dist = np.minimum(dist, self.cpg_max_dist) / self.cpg_max_dist
            prepro_states.append(np.expand_dims(state, 1))
            prepro_dists.append(np.expand_dims(dist, 1))
        prepro_states = np.concatenate(prepro_states, axis=1)
        prepro_dists = np.concatenate(prepro_dists, axis=1)
        if self.cpg_wlen:
            center = prepro_states.shape[2] // 2
            delta = self.cpg_wlen // 2
            tmp = slice(center - delta, center + delta)
            prepro_states = prepro_states[:, :, tmp]
            prepro_dists = prepro_dists[:, :, tmp]
        return (prepro_states, prepro_dists) 
开发者ID:kipoi,项目名称:models,代码行数:24,代码来源:dataloader_m.py

示例8: forward

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import concatenate [as 别名]
def forward(self, input):
        # array has shape (N, 4, 1, 1000)
        # return the sequence + its RC concatenated
        # create inverted indices
        invert_dims = [1,3]
        input_bkup = input
        for idim in invert_dims:
            idxs = [i for i in range(input.size(idim)-1, -1, -1)]
            idxs_var = Variable(torch.LongTensor(idxs))
            if input.is_cuda:
                idxs_var =idxs_var.cuda()
            input = input.index_select(idim, idxs_var)
        #
        input = torch.cat([input_bkup, input], dim=0)
        #
        # Using numpy:
        #input = edit_tensor_in_numpy(input, lambda x: np.concatenate([x, x[:,::-1, : ,::-1]],axis=0))
        return input 
开发者ID:kipoi,项目名称:models,代码行数:20,代码来源:model_architecture.py

示例9: predict_on_batch

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import concatenate [as 别名]
def predict_on_batch(self, inputs):
            if inputs.shape == (2,):
                inputs = inputs[np.newaxis, :]
            # Encode
            max_len = len(max(inputs, key=len))
            one_hot_ref =  self.encode(inputs[:,0])
            one_hot_alt = self.encode(inputs[:,1])
            # Construct dummy library indicator
            indicator = np.zeros((inputs.shape[0],2))
            indicator[:,1] = 1
            # Compute fold change for all three frames
            fc_changes = []
            for shift in range(3):
                if shift > 0:
                    shifter = np.zeros((one_hot_ref.shape[0],1,4))
                    one_hot_ref = np.concatenate([one_hot_ref, shifter], axis=1)
                    one_hot_alt = np.concatenate([one_hot_alt, shifter], axis=1)
                pred_ref = self.model.predict_on_batch([one_hot_ref, indicator]).reshape(-1)
                pred_variant = self.model.predict_on_batch([one_hot_alt, indicator]).reshape(-1)
                fc_changes.append(np.log2(pred_variant/pred_ref))
            # Return
            return {"mrl_fold_change":fc_changes[0], 
                    "shift_1":fc_changes[1],
                    "shift_2":fc_changes[2]} 
开发者ID:kipoi,项目名称:models,代码行数:26,代码来源:model.py

示例10: auto_inverse

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import concatenate [as 别名]
def auto_inverse(self, whole_spectrum):
        whole_spectrum = np.copy(whole_spectrum).astype(complex)
        whole_spectrum[whole_spectrum < 1] = 1
        overwrap = self.buffer_size * 2
        height = whole_spectrum.shape[0]
        parallel_dif = (height-overwrap) // self.parallel
        if height < self.parallel*overwrap:
            raise Exception('voice length is too small to use gpu, or parallel number is too big')

        spec = [self.inverse(whole_spectrum[range(i, i+parallel_dif*self.parallel, parallel_dif), :]) for i in tqdm.tqdm(range(parallel_dif+overwrap))]
        spec = spec[overwrap:]
        spec = np.concatenate(spec, axis=1)
        spec = spec.reshape(-1, self.wave_len)

        #Below code don't consider wave_len and wave_dif, I'll fix.
        wave = np.fft.ifft(spec, axis=1).real
        pad = np.zeros((wave.shape[0], 2), dtype=float)
        wave = np.concatenate([wave, pad], axis=1)

        dst = np.zeros((wave.shape[0]+3)*self.wave_dif, dtype=float)
        for i in range(4):
            w = wave[range(i, wave.shape[0], 4),:]
            w = w.reshape(-1)
            dst[i*self.wave_dif:i*self.wave_dif+len(w)] += w
        return dst*0.5 
开发者ID:pstuvwx,项目名称:Deep_VoiceChanger,代码行数:27,代码来源:gla_gpu.py

示例11: wavefile_to_waveform

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import concatenate [as 别名]
def wavefile_to_waveform(wav_file, features_type):
    data, sr = sf.read(wav_file)
    if features_type == 'vggish':
        tmp_name = str(int(np.random.rand(1)*1000000)) + '.wav'
        sf.write(tmp_name, data, sr, subtype='PCM_16')
        sr, wav_data = wavfile.read(tmp_name)
        os.remove(tmp_name)
        # sr, wav_data = wavfile.read(wav_file) # as done in VGGish Audioset
        assert wav_data.dtype == np.int16, 'Bad sample type: %r' % wav_data.dtype
        data = wav_data / 32768.0  # Convert to [-1.0, +1.0]
  
    # at least one second of samples, if not repead-pad
    src_repeat = data
    while (src_repeat.shape[0] < sr): 
        src_repeat = np.concatenate((src_repeat, data), axis=0)
        data = src_repeat[:sr]

    return data, sr 
开发者ID:jordipons,项目名称:sklearn-audio-transfer-learning,代码行数:20,代码来源:utils.py

示例12: predict

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import concatenate [as 别名]
def predict(self, Z):
        """
        Make predictions on new dataset.

        Parameters
        ----------
        Z : array
            new data set (M samples by D features)

        Returns
        -------
        preds : array
            label predictions (M samples by 1)

        """
        # Data shape
        M, D = Z.shape

        # If classifier is trained, check for same dimensionality
        if self.is_trained:
            if not self.train_data_dim == D:
                raise ValueError('''Test data is of different dimensionality
                                 than training data.''')

        # Check for augmentation
        if not self.train_data_dim == D:
            Z = np.concatenate((np.dot(Z, self.C), Z), axis=1)

        # Call scikit's predict function
        preds = self.clf.predict(Z)

        # For quadratic loss function, correct predictions
        if self.loss == 'quadratic':
            preds = (np.sign(preds)+1)/2.

        # Return predictions array
        return preds 
开发者ID:wmkouw,项目名称:libTLDA,代码行数:39,代码来源:scl.py

示例13: safe_nlp_vector

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import concatenate [as 别名]
def safe_nlp_vector(self, words):
        """
        Parameters
            ----------
            words : list of str/str 
                wordbag
        Returns
            ----------
            ndarray(float)
                the corresponding vectors of words in wordbag.
                a vector contains the similarities calculated by word2vec and wordnet.
        """
        if isinstance(words, string_types):
            synonym=self.synonym_label(words)
            similarity=self.similarity_label(words)
        else:
            synonym=np.empty((len(self.Label_index),len(words)))
            similarity=np.empty((len(self.Label_index),len(words)))
            for i in range(len(words)):
                try:
                    synonym[:,i]=self.synonym_label(words[i])
                except:
                    synonym[:,i]=np.zeros((len(self.Label_index),1))[:,0]
                try:    
                    similarity[:,i]=self.similarity_label(words[i])[:,0]
                except:
                    similarity[:,i]=np.zeros((len(self.Label_index),1))[:,0]
        vector=np.concatenate((similarity, synonym))
        return vector 
开发者ID:Coldog2333,项目名称:Financial-NLP,代码行数:31,代码来源:NLP.py

示例14: nlp_vector

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import concatenate [as 别名]
def nlp_vector(self, words):
        if isinstance(words, string_types):
            synonym=self.synonym_label(words)
            similarity=self.similarity_label(words)
        else:
            synonym=self.synonym_label(words)
            similarity=np.empty((len(self.Label_index),len(words)))
            for i in range(len(words)):
                try:
                    similarity[:,i]=self.similarity_label(words[i])[:,0]
                except:
                    similarity[:,i]=np.zeros((len(self.Label_index),1))[:,0]
        vector=np.concatenate((similarity, synonym))
        return vector 
开发者ID:Coldog2333,项目名称:Financial-NLP,代码行数:16,代码来源:NLP.py

示例15: _compute_neighborhood_graph_weight

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import concatenate [as 别名]
def _compute_neighborhood_graph_weight(self, root, graph):
        # list all nodes at increasing distances
        # at each distance
        # compute the arithmetic mean weight on nodes
        # compute the geometric mean weight on edges
        # compute the product of the two
        # make a list of the neighborhood_graph_weight at every distance
        neighborhood_graph_weight_list = []
        w = graph.nodes[root][self.key_weight]
        node_weight_list = np.array([w], dtype=np.float64)
        node_average = node_weight_list[0]
        edge_weight_list = np.array([1], dtype=np.float64)
        edge_average = edge_weight_list[0]
        # for all distances
        root_dist_dict = graph.nodes[root]['remote_neighbours']
        for dist in root_dist_dict.keys():
            # extract array of weights at given dist
            weight_array_at_d = np.array([graph.nodes[v][self.key_weight]
                                          for v in root_dist_dict[dist]],
                                         dtype=np.float64)
            if dist % 2 == 0:  # nodes
                node_weight_list = np.concatenate(
                    (node_weight_list, weight_array_at_d))
                node_average = np.mean(node_weight_list)
            else:  # edges
                edge_weight_list = np.concatenate(
                    (edge_weight_list, weight_array_at_d))
                edge_average = stats.gmean(edge_weight_list)
            weight = node_average * edge_average
            neighborhood_graph_weight_list.append(weight)
        graph.nodes[root]['neigh_graph_weight'] = \
            neighborhood_graph_weight_list 
开发者ID:fabriziocosta,项目名称:EDeN,代码行数:34,代码来源:graph.py


注:本文中的numpy.concatenate方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。