当前位置: 首页>>代码示例>>Python>>正文


Python numpy.load方法代码示例

本文整理汇总了Python中numpy.load方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.load方法的具体用法?Python numpy.load怎么用?Python numpy.load使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在numpy的用法示例。


在下文中一共展示了numpy.load方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _deserialize

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import load [as 别名]
def _deserialize(self, data, type_):

        if self.compress:
        # decompress the data if needed
            data = lz4.frame.decompress(data)

        if type_ == _NUMPY:
        # deserialize numpy arrays
            buf = io.BytesIO(data)
            data = np.load(buf)

        elif type_ == _PICKLE:
        # deserialize other python objects
            data = pickle.loads(data)

        else:
        # Otherwise we just return data as it is (bytes)
            pass

        return data 
开发者ID:mme,项目名称:vergeml,代码行数:22,代码来源:cache.py

示例2: setup

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import load [as 别名]
def setup(self, bottom, top):
        layer_params = yaml.load(self.param_str)
        self._layer_params = layer_params
        # default batch_size = 256
        self._batch_size = int(layer_params.get('batch_size', 256))
        self._resize = layer_params.get('resize', -1)
        self._mean_file = layer_params.get('mean_file', None)
        self._source_type = layer_params.get('source_type', 'CSV')
        self._shuffle = layer_params.get('shuffle', False)
        # read image_mean from file and preload all data into memory
        # will read either file or array into self._mean
        self.set_mean()
        self.preload_db()
        self._compressed = self._layer_params.get('compressed', True)
        if not self._compressed:
            self.decompress_data() 
开发者ID:liuxianming,项目名称:Caffe-Python-Data-Layer,代码行数:18,代码来源:BasePythonDataLayer.py

示例3: set_mean

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import load [as 别名]
def set_mean(self):
        if self._mean_file:
            if type(self._mean_file) is str:
                # read image mean from file
                try:
                    # if it is a pickle file
                    self._mean = np.load(self._mean_file)
                except (IOError):
                    blob = caffe_pb2.BlobProto()
                    blob_str = open(self._mean_file, 'rb').read()
                    blob.ParseFromString(blob_str)
                    self._mean = np.array(caffe.io.blobproto_to_array(blob))[0]
            else:
                self._mean = self._mean_file
                self._mean = np.array(self._mean)
        else:
            self._mean = None 
开发者ID:liuxianming,项目名称:Caffe-Python-Data-Layer,代码行数:19,代码来源:BasePythonDataLayer.py

示例4: load_encodings

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import load [as 别名]
def load_encodings():
    """
    加载保存的历史人脸向量,以及name向量,并返回
    :return:
    """
    known_face_encodings = np.load(KNOWN_FACE_ENCODINGS)
    known_face_names = np.load(KNOWN_FACE_NANE)
    if not os.path.exists(KNOWN_FACE_NANE) or not os.path.exists(KNOWN_FACE_ENCODINGS):
        encoding_images(data_path)
    aa = [file for file in os.listdir(data_path) if os.path.isfile(os.path.join(data_path, file)) and file.endswith("npy")]
    # ("known_face_encodings_") or file.startswith("known_face_name_"))
    for data in aa:
        if data.startswith('known_face_encodings_'):
            tmp_face_encodings = np.load(os.path.join(data_path,data))
            known_face_encodings = np.concatenate((known_face_encodings, tmp_face_encodings), axis=0)
            print("load ", data)
        elif data.startswith('known_face_name_'):
            tmp_face_name = np.load(os.path.join(data_path, data))
            known_face_names = np.concatenate((known_face_names, tmp_face_name), axis=0)
            print("load ", data)
        else:
            print('skip to load original ', data)
    return known_face_encodings,known_face_names 
开发者ID:matiji66,项目名称:face-attendance-machine,代码行数:25,代码来源:encoding_images.py

示例5: create_cifar100

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import load [as 别名]
def create_cifar100(tfrecord_dir, cifar100_dir):
    print('Loading CIFAR-100 from "%s"' % cifar100_dir)
    import pickle
    with open(os.path.join(cifar100_dir, 'train'), 'rb') as file:
        data = pickle.load(file, encoding='latin1')
    images = data['data'].reshape(-1, 3, 32, 32)
    labels = np.array(data['fine_labels'])
    assert images.shape == (50000, 3, 32, 32) and images.dtype == np.uint8
    assert labels.shape == (50000,) and labels.dtype == np.int32
    assert np.min(images) == 0 and np.max(images) == 255
    assert np.min(labels) == 0 and np.max(labels) == 99
    onehot = np.zeros((labels.size, np.max(labels) + 1), dtype=np.float32)
    onehot[np.arange(labels.size), labels] = 1.0

    with TFRecordExporter(tfrecord_dir, images.shape[0]) as tfr:
        order = tfr.choose_shuffled_order()
        for idx in range(order.size):
            tfr.add_image(images[order[idx]])
        tfr.add_labels(onehot[order])

#---------------------------------------------------------------------------- 
开发者ID:zalandoresearch,项目名称:disentangling_conditional_gans,代码行数:23,代码来源:dataset_tool.py

示例6: deserialize_ndarray_npy

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import load [as 别名]
def deserialize_ndarray_npy(d):
    """
    Deserializes a JSONified :obj:`numpy.ndarray` that was created using numpy's
    :obj:`save` function.

    Args:
        d (:obj:`dict`): A dictionary representation of an :obj:`ndarray` object, created
            using :obj:`numpy.save`.

    Returns:
        An :obj:`ndarray` object.
    """
    with io.BytesIO() as f:
        f.write(json.loads(d['npy']).encode('latin-1'))
        f.seek(0)
        return np.load(f) 
开发者ID:gregreen,项目名称:dustmaps,代码行数:18,代码来源:json_serializers.py

示例7: __init__

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import load [as 别名]
def __init__(self, data_path, data_split, vocab, cap_suffix='caps'):
        self.vocab = vocab
        loc = data_path + '/'

        # Captions
        self.captions = []
        with open(loc+'%s_%s.txt' % (data_split, cap_suffix), 'rb') as f:
            for line in f:
                tmp = line.strip()
                if type(tmp) == bytes:
                    tmp = bytes.decode(tmp)
                self.captions.append(tmp)

        # Image features
        self.images = np.load(loc+'%s_ims.npy' % data_split)
        self.length = len(self.captions)
        # rkiros data has redundancy in images, we divide by 5, 10crop doesn't
        if self.images.shape[0] != self.length:
            self.im_div = 5
        else:
            self.im_div = 1
        # the development set for coco is large and so validation would be slow
        if data_split == 'dev':
            self.length = 5000 
开发者ID:ExplorerFreda,项目名称:VSE-C,代码行数:26,代码来源:data.py

示例8: load_mnist

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import load [as 别名]
def load_mnist(training_num=50000):
    data_path = os.path.join(os.path.dirname(os.path.realpath('__file__')), 'mnist.npz')
    if not os.path.isfile(data_path):
        from six.moves import urllib
        origin = (
            'https://github.com/sxjscience/mxnet/raw/master/example/bayesian-methods/mnist.npz'
        )
        print('Downloading data from %s to %s' % (origin, data_path))
        ctx = ssl._create_unverified_context()
        with urllib.request.urlopen(origin, context=ctx) as u, open(data_path, 'wb') as f:
            f.write(u.read())
        print('Done!')
    dat = numpy.load(data_path)
    X = (dat['X'][:training_num] / 126.0).astype('float32')
    Y = dat['Y'][:training_num]
    X_test = (dat['X_test'] / 126.0).astype('float32')
    Y_test = dat['Y_test']
    Y = Y.reshape((Y.shape[0],))
    Y_test = Y_test.reshape((Y_test.shape[0],))
    return X, Y, X_test, Y_test 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:22,代码来源:data_loader.py

示例9: load_params

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import load [as 别名]
def load_params(dir_path="", epoch=None, name=""):
    prefix = os.path.join(dir_path, name)
    _, param_loading_path, _ = get_saving_path(prefix, epoch)
    while not os.path.isfile(param_loading_path):
        logging.info("in load_param, %s Not Found!" % param_loading_path)
        time.sleep(60)
    save_dict = nd.load(param_loading_path)
    arg_params = {}
    aux_params = {}
    for k, v in save_dict.items():
        tp, name = k.split(':', 1)
        if tp == 'arg':
            arg_params[name] = v
        if tp == 'aux':
            aux_params[name] = v
    return arg_params, aux_params, param_loading_path 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:18,代码来源:utils.py

示例10: test_consistency

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import load [as 别名]
def test_consistency(dump=False):
    shape = (299, 299)
    _get_model()
    _get_data(shape)
    if dump:
        _dump_images(shape)
        gt = None
    else:
        gt = {n: mx.nd.array(a) for n, a in np.load('data/inception-v3-dump.npz').items()}
    data = np.load('data/test_images_%d_%d.npy'%shape)
    sym, arg_params, aux_params = mx.model.load_checkpoint('model/Inception-7', 1)
    arg_params['data'] = data
    arg_params['softmax_label'] = np.random.randint(low=1, high=1000, size=(data.shape[0],))
    ctx_list = [{'ctx': mx.gpu(0), 'data': data.shape, 'type_dict': {'data': data.dtype}},
                {'ctx': mx.cpu(0), 'data': data.shape, 'type_dict': {'data': data.dtype}}]
    gt = check_consistency(sym, ctx_list, arg_params=arg_params, aux_params=aux_params,
                           tol=1e-3, grad_req='null', raise_on_err=False, ground_truth=gt)
    if dump:
        np.savez('data/inception-v3-dump.npz', **{n: a.asnumpy() for n, a in gt.items()}) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:21,代码来源:test_forward.py

示例11: extract_mnist_data

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import load [as 别名]
def extract_mnist_data(filename, num_images, image_size, pixel_depth):
  """
  Extract the images into a 4D tensor [image index, y, x, channels].

  Values are rescaled from [0, 255] down to [-0.5, 0.5].
  """
  # if not os.path.exists(file):
  if not tf.gfile.Exists(filename+".npy"):
    with gzip.open(filename) as bytestream:
      bytestream.read(16)
      buf = bytestream.read(image_size * image_size * num_images)
      data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
      data = (data - (pixel_depth / 2.0)) / pixel_depth
      data = data.reshape(num_images, image_size, image_size, 1)
      np.save(filename, data)
      return data
  else:
    with tf.gfile.Open(filename+".npy", mode='r') as file_obj:
      return np.load(file_obj) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:21,代码来源:input.py

示例12: is_image_file

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import load [as 别名]
def is_image_file(id, dataset, dtype, filename):
    filename_lower = filename.lower()
    if any(filename_lower.endswith(ext) for ext in IMG_EXTENSIONS):
        if dtype == 'novel':
            try:
                default_loader(filename)
                return True
            except OSError:
                print('{filename} failed to load'.format(filename=filename))
                with open('taxonomy/{dataset}/corrupted_{dtype}_{id:d}.txt' \
                          .format(dataset=dataset, dtype=dtype, id=id), 'a') as f:
                    f.write(filename + '\n')
                return False
        else:
            return True
    else:
        return False 
开发者ID:kibok90,项目名称:cvpr2018-hnd,代码行数:19,代码来源:preparation.py

示例13: generate_train_batch

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import load [as 别名]
def generate_train_batch(self):

        batch_data, batch_segs, batch_pids, batch_targets = [], [], [], []
        class_targets_list =  [v['class_target'] for (k, v) in self._data.items()]

        #samples patients towards equilibrium of foreground classes on a roi-level (after randomly sampling the ratio "batch_sample_slack).
        batch_ixs = dutils.get_class_balanced_patients(
            class_targets_list, self.batch_size, self.cf.head_classes - 1, slack_factor=self.cf.batch_sample_slack)
        patients = list(self._data.items())

        for b in batch_ixs:

            patient = patients[b][1]
            all_data = np.load(patient['data'], mmap_mode='r')
            data = all_data[0]
            seg = all_data[1].astype('uint8')
            batch_pids.append(patient['pid'])
            batch_targets.append(patient['class_target'])
            batch_data.append(data[np.newaxis])
            batch_segs.append(seg[np.newaxis])

        data = np.array(batch_data)
        seg = np.array(batch_segs).astype(np.uint8)
        class_target = np.array(batch_targets)
        return {'data': data, 'seg': seg, 'pid': batch_pids, 'class_target': class_target} 
开发者ID:MIC-DKFZ,项目名称:medicaldetectiontoolkit,代码行数:27,代码来源:data_loader.py

示例14: compute_mfcc

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import load [as 别名]
def compute_mfcc(audio, **kwargs):
    """
    Compute the MFCC for a given audio waveform. This is
    identical to how DeepSpeech does it, but does it all in
    TensorFlow so that we can differentiate through it.
    """

    batch_size, size = audio.get_shape().as_list()
    audio = tf.cast(audio, tf.float32)

    # 1. Pre-emphasizer, a high-pass filter
    audio = tf.concat((audio[:, :1], audio[:, 1:] - 0.97*audio[:, :-1], np.zeros((batch_size,1000),dtype=np.float32)), 1)

    # 2. windowing into frames of 320 samples, overlapping
    windowed = tf.stack([audio[:, i:i+400] for i in range(0,size-320,160)],1)

    # 3. Take the FFT to convert to frequency space
    ffted = tf.spectral.rfft(windowed, [512])
    ffted = 1.0 / 512 * tf.square(tf.abs(ffted))

    # 4. Compute the Mel windowing of the FFT
    energy = tf.reduce_sum(ffted,axis=2)+1e-30
    filters = np.load("filterbanks.npy").T
    feat = tf.matmul(ffted, np.array([filters]*batch_size,dtype=np.float32))+1e-30

    # 5. Take the DCT again, because why not
    feat = tf.log(feat)
    feat = tf.spectral.dct(feat, type=2, norm='ortho')[:,:,:26]

    # 6. Amplify high frequencies for some reason
    _,nframes,ncoeff = feat.get_shape().as_list()
    n = np.arange(ncoeff)
    lift = 1 + (22/2.)*np.sin(np.pi*n/22)
    feat = lift*feat
    width = feat.get_shape().as_list()[1]

    # 7. And now stick the energy next to the features
    feat = tf.concat((tf.reshape(tf.log(energy),(-1,width,1)), feat[:, :, 1:]), axis=2)
    
    return feat 
开发者ID:rtaori,项目名称:Black-Box-Audio,代码行数:42,代码来源:tf_logits.py

示例15: read

# 需要导入模块: import numpy [as 别名]
# 或者: from numpy import load [as 别名]
def read(self, file, path):
        """Read the content index from file.
        """
        pos, = struct.unpack('<Q', file.read(8))
        if pos == 0:
            raise VergeMLError("Invalid cache file: {}".format(path))
        file.seek(pos)
        self.index, self.meta, self.info = pickle.load(file) 
开发者ID:mme,项目名称:vergeml,代码行数:10,代码来源:cache.py


注:本文中的numpy.load方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。