当前位置: 首页>>代码示例>>Python>>正文


Python data.append方法代码示例

本文整理汇总了Python中torch.utils.data.append方法的典型用法代码示例。如果您正苦于以下问题:Python data.append方法的具体用法?Python data.append怎么用?Python data.append使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.utils.data的用法示例。


在下文中一共展示了data.append方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_seq

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import append [as 别名]
def get_seq(pairs,lang,batch_size,type,max_len):   
    x_seq = []
    y_seq = []
    ptr_seq = []
    for pair in pairs:
        x_seq.append(pair[0])
        y_seq.append(pair[1])
        ptr_seq.append(pair[2])
        if(type):
            lang.index_words(pair[0])
            lang.index_words(pair[1])
    
    dataset = Dataset(x_seq, y_seq,ptr_seq,lang.word2index, lang.word2index,max_len)
    data_loader = torch.utils.data.DataLoader(dataset=dataset,
                                              batch_size=batch_size,
                                              shuffle=type,
                                              collate_fn=collate_fn)
    return data_loader 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:20,代码来源:utils_NMT.py

示例2: preprocess

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import append [as 别名]
def preprocess(self, sequence, word2id, trg=True):
        """Converts words to ids."""
        if trg:
            story = [word2id[word] if word in word2id else UNK_token for word in sequence.split(' ')]+ [EOS_token]
        else:
            story = []
            for i, word_triple in enumerate(sequence):
                story.append([])
                for ii, word in enumerate(word_triple):
                    temp = word2id[word] if word in word2id else UNK_token
                    story[i].append(temp)
        try:
            story = torch.Tensor(story)
        except:
            print(sequence)
            print(story)
        return story 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:19,代码来源:utils_woz_mem2seq.py

示例3: get_seq

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import append [as 别名]
def get_seq(pairs,lang,batch_size,type,max_len):   
    x_seq = []
    y_seq = []
    ptr_seq = []
    gate_seq = []
    for pair in pairs:
        x_seq.append(pair[0])
        y_seq.append(pair[1])
        ptr_seq.append(pair[2])
        gate_seq.append(pair[3])
        if(type):
            lang.index_words(pair[0])
    
    dataset = Dataset(x_seq, y_seq,ptr_seq,gate_seq,lang.word2index, lang.word2index,max_len)
    data_loader = torch.utils.data.DataLoader(dataset=dataset,
                                              batch_size=batch_size,
                                              shuffle=type,
                                              collate_fn=collate_fn)
    return data_loader 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:21,代码来源:until_temp.py

示例4: load_candidates

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import append [as 别名]
def load_candidates(task_id, candidates_f):
    # containers
    #type_dict = get_type_dict(KB_DIR, dstc2=(task_id==6))
    candidates, candid2idx, idx2candid = [], {}, {}
    # update data source file based on task id
    candidates_f = DATA_SOURCE_TASK6 if task_id==6 else candidates_f
    # read from file
    with open(candidates_f) as f:
        # iterate through lines
        for i, line in enumerate(f):
            # tokenize each line into... well.. tokens!
            temp = line.strip().split(' ')
            candid2idx[line.strip().split(' ',1)[1]] = i
            candidates.append(temp[1:])
            idx2candid[i] = line.strip().split(' ',1)[1]
    return candidates, candid2idx, idx2candid 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:18,代码来源:until_temp.py

示例5: get_seq

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import append [as 别名]
def get_seq(pairs,lang,batch_size,type,max_len):   
    x_seq = []
    y_seq = []
    ptr_seq = []
    gate_seq = []
    for pair in pairs:
        x_seq.append(pair[0])
        y_seq.append(pair[1])
        ptr_seq.append(pair[2])
        gate_seq.append(pair[3])
        if(type):
            lang.index_words(pair[0])
            lang.index_words(pair[1])
    
    dataset = Dataset(x_seq, y_seq,ptr_seq,gate_seq,lang.word2index, lang.word2index,max_len)
    data_loader = torch.utils.data.DataLoader(dataset=dataset,
                                              batch_size=batch_size,
                                              shuffle=type,
                                              collate_fn=collate_fn)
    return data_loader 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:22,代码来源:utils_babi.py

示例6: __getitem__

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import append [as 别名]
def __getitem__(self, idx):
        if isinstance(idx, slice):
            data = []
            for i in range(
                idx.start, idx.stop, idx.step if idx.step is not None else 1
            ):
                temp_data = []
                for key in self.order:
                    temp_data.append(self.dict_object[key][i])
                data.append(temp_data)

        else:
            data = []
            for key in self.order:
                data.append(self.dict_object[key][idx])

        return data 
开发者ID:huggingface,项目名称:neuralcoref,代码行数:19,代码来源:dataset.py

示例7: update_coreset

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import append [as 别名]
def update_coreset(self, coreset_size, seen):
        num_data_per = coreset_size // len(seen)
        remainder = coreset_size % len(seen)
        data = []
        targets = []

        # random coreset management; latest classes take memory remainder
        # coreset selection without affecting RNG state
        state = np.random.get_state()
        np.random.seed(self.seed*10000+self.t)
        for k in reversed(seen):
            locs = (self.targets == k).nonzero()[0]
            if (remainder > 0) and (len(locs) > num_data_per):
                num_data_k = num_data_per + 1
                remainder -= 1
            else:
                num_data_k = min(len(locs), num_data_per)
            locs_chosen = locs[np.random.choice(len(locs), num_data_k, replace=False)]
            data.append(self.data[locs_chosen])
            targets.append(self.targets[locs_chosen])
        self.coreset = (np.concatenate(list(reversed(data)), axis=0), np.concatenate(list(reversed(targets)), axis=0))
        np.random.set_state(state) 
开发者ID:kibok90,项目名称:iccv2019-inc,代码行数:24,代码来源:inc_ext.py

示例8: prepare

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import append [as 别名]
def prepare(self, dim, sd):
        """
        Make torch Tensors from g2-`dim`-`sd` and infer labels.
        Args:
            dim:
            sd:

        Returns:

        """
        filename = 'g2-{}-{}.txt'.format(dim, sd)
        data = []
        target = []
        with open(os.path.join(self.root, filename)) as in_f:
            for i, line in enumerate(in_f):
                a, b = list(map(int, line.split())), 0 if i < 1024 else 1
                data.append(a)
                target.append(b)
        data = torch.Tensor(data)
        target = torch.Tensor(target)

        if self.stardardize:
            data = (data - 550) / 50

        return data, target 
开发者ID:rdevon,项目名称:cortex,代码行数:27,代码来源:toysets.py

示例9: read_object_labels_csv

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import append [as 别名]
def read_object_labels_csv(file, header=True):
    images = []
    num_categories = 0
    print('[dataset] read', file)
    with open(file, 'r') as f:
        reader = csv.reader(f)
        rownum = 0
        for row in reader:
            if header and rownum == 0:
                header = row
            else:
                if num_categories == 0:
                    num_categories = len(row) - 1
                name = row[0]
                labels = (np.asarray(row[1:num_categories + 1])).astype(np.float32)
                labels = torch.from_numpy(labels)
                item = (name, labels)
                images.append(item)
            rownum += 1
    return images 
开发者ID:alexandonian,项目名称:pretorched-x,代码行数:22,代码来源:voc.py

示例10: main

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import append [as 别名]
def main():
    """ Convert dataset to sequences.
    """
    df = load_train_df()
    data = []
    for item in tqdm.tqdm(df.itertuples(), total=len(df)):
        if not item.labels:
            continue
        labels = np.array(item.labels.split(' ')).reshape(-1, 5)
        sequences = get_sequences(labels[:, 1:].astype(float))
        for seq in sequences:
            data.append({
                'image_id': item.image_id,
                'text': ' '.join(labels[i, 0] for i in seq),
            })
    pd.DataFrame(data).to_csv(TRAIN_TEXTS_PATH, index=None) 
开发者ID:lopuhin,项目名称:kaggle-kuzushiji-2019,代码行数:18,代码来源:dataset.py

示例11: getdatamask

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import append [as 别名]
def getdatamask(data, mask_data, debug=False): # read data and mask, reshape
    datas = []
    for fnm, masks in tqdm(zip(data, mask_data)):
        item = {}
        img = np.load(fnm) # z y x
        nz, ny, nx = img.shape
        tnz, tny, tnx = math.ceil(nz/8.)*8., math.ceil(ny/8.)*8., math.ceil(nx/8.)*8.
        img = imfit(img, int(tnz), int(tny), int(tnx)) #zoom(img, (tnz/nz,tny/ny,tnx/nx), order=2, mode='nearest')
        item['img'] = t.from_numpy(img)
        item['mask'] = []
        for idx, maskfnm in enumerate(masks):
            if maskfnm is None: 
                ms = np.zeros((nz, ny, nx), np.uint8)
            else: 
                ms = np.load(maskfnm).astype(np.uint8)
                assert ms.min() == 0 and ms.max() == 1
            mask = imfit(ms, int(tnz), int(tny), int(tnx)) #zoom(ms, (tnz/nz,tny/ny,tnx/nx), order=0, mode='constant')
            item['mask'].append(mask)
        assert len(item['mask']) == 9
        item['name'] = str(fnm)#.split('/')[-1]
        datas.append(item)
    return datas 
开发者ID:wentaozhu,项目名称:AnatomyNet-for-anatomical-segmentation,代码行数:24,代码来源:baseline3Pool.py

示例12: getdatamask

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import append [as 别名]
def getdatamask(data, mask_data, debug=False): # read data and mask, reshape
    datas = []
    for fnm, masks in tqdm(zip(data, mask_data)):
        item = {}
        img = np.load(fnm) # z y x
        nz, ny, nx = img.shape
#         if nz > 300 or ny > 300 or nx > 300: 
#             print(fnm, nx, ny, nz)
#             assert 1==0
        tnz, tny, tnx = math.ceil(nz/8.)*8., math.ceil(ny/8.)*8., math.ceil(nx/8.)*8.
        img = imfit(img, int(tnz), int(tny), int(tnx)) #zoom(img, (tnz/nz,tny/ny,tnx/nx), order=2, mode='nearest')
        item['img'] = t.from_numpy(img)
        item['mask'] = []
        for idx, maskfnm in enumerate(masks):
            if maskfnm is None: 
                ms = np.zeros((nz, ny, nx), np.uint8)
            else: 
                ms = np.load(maskfnm).astype(np.uint8)
                assert ms.min() == 0 and ms.max() == 1
            mask = imfit(ms, int(tnz), int(tny), int(tnx)) #zoom(ms, (tnz/nz,tny/ny,tnx/nx), order=0, mode='constant')
            item['mask'].append(mask)
        assert len(item['mask']) == 9
        item['name'] = str(fnm)#.split('/')[-1]
        datas.append(item)
    return datas 
开发者ID:wentaozhu,项目名称:AnatomyNet-for-anatomical-segmentation,代码行数:27,代码来源:baselineDiceFocalLoss.py

示例13: getdatamask

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import append [as 别名]
def getdatamask(data, mask_data, debug=False): # read data and mask, reshape
    datas = []
    for fnm, masks in tqdm(zip(data, mask_data)):
        item = {}
        img = np.load(fnm) # z y x
        nz, ny, nx = img.shape
        tnz, tny, tnx = math.ceil(nz/16.)*16., math.ceil(ny/16.)*16., math.ceil(nx/16.)*16.
        img = imfit(img, int(tnz), int(tny), int(tnx)) #zoom(img, (tnz/nz,tny/ny,tnx/nx), order=2, mode='nearest')
        item['img'] = t.from_numpy(img)
        item['mask'] = []
        for idx, maskfnm in enumerate(masks):
            if maskfnm is None: 
                ms = np.zeros((nz, ny, nx), np.uint8)
            else: 
                ms = np.load(maskfnm).astype(np.uint8)
                assert ms.min() == 0 and ms.max() == 1
            mask = imfit(ms, int(tnz), int(tny), int(tnx)) #zoom(ms, (tnz/nz,tny/ny,tnx/nx), order=0, mode='constant')
            item['mask'].append(mask)
        assert len(item['mask']) == 9
        item['name'] = str(fnm)#.split('/')[-1]
        datas.append(item)
    return datas 
开发者ID:wentaozhu,项目名称:AnatomyNet-for-anatomical-segmentation,代码行数:24,代码来源:baseline4Pool.py

示例14: read_langs

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import append [as 别名]
def read_langs(file_name, max_line = None):
    logging.info(("Reading lines from {}".format(file_name)))
    data=[]

    with open(file_name) as fin:
        cnt_ptr = 0
        cnt_voc = 0
        max_r_len = 0
        for line in fin:
            line=line.strip()
            if line:
                eng, fre = line.split('\t')
                eng, fre = word_tokenize(eng.lower()), word_tokenize(fre.lower())     
                ptr_index = []
                for key in fre:
                    index = [loc for loc, val in enumerate(eng) if (val[0] == key)]
                    if (index):
                        index = max(index)
                        cnt_ptr +=1
                    else: 
                        index = len(eng) ## sentinel 
                        cnt_voc +=1             
                    ptr_index.append(index)

                if len(ptr_index) > max_r_len: 
                    max_r_len = len(ptr_index)
                eng = eng + ['$$$$']       
                # print(eng,fre,ptr_index)
                data.append([eng,fre,ptr_index])


    max_len = max([len(d[0]) for d in data])
    logging.info("Pointer percentace= {} ".format(cnt_ptr/(cnt_ptr+cnt_voc)))
    logging.info("Max responce Len: {}".format(max_r_len))
    logging.info("Max Input Len: {}".format(max_len))
    logging.info('Sample: Eng = {}, Fre = {}, Ptr = {}'.format(" ".join(data[0][0])," ".join(data[0][1]),data[0][2]))
    return data, max_len, max_r_len 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:39,代码来源:utils_NMT.py

示例15: generate_memory

# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import append [as 别名]
def generate_memory(sent, speaker, time):
    sent_new = []
    sent_token = sent.split(' ')
    if speaker=="$u" or speaker=="$s":
        for word in sent_token:
            temp = [word, speaker, 't'+str(time)] + ["PAD"]*(MEM_TOKEN_SIZE-3)
            sent_new.append(temp)
    else:
        sent_token = sent_token[::-1] + ["PAD"]*(MEM_TOKEN_SIZE-len(sent_token))
        sent_new.append(sent_token)
    return sent_new 
开发者ID:ConvLab,项目名称:ConvLab,代码行数:13,代码来源:utils_woz_mem2seq.py


注:本文中的torch.utils.data.append方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。