當前位置: 首頁>>代碼示例>>Python>>正文


Python dill.load方法代碼示例

本文整理匯總了Python中dill.load方法的典型用法代碼示例。如果您正苦於以下問題:Python dill.load方法的具體用法?Python dill.load怎麽用?Python dill.load使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在dill的用法示例。


在下文中一共展示了dill.load方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: load

# 需要導入模塊: import dill [as 別名]
# 或者: from dill import load [as 別名]
def load(path, num_cpu=16):
    """Load act function that was returned by learn function.

    Parameters
    ----------
    path: str
        path to the act function pickle
    num_cpu: int
        number of cpus to use for executing the policy

    Returns
    -------
    act: ActWrapper
        function that takes a batch of observations
        and returns actions.
    """
    return ActWrapper.load(path, num_cpu=num_cpu) 
開發者ID:AdamStelmaszczyk,項目名稱:learning2run,代碼行數:19,代碼來源:simple.py

示例2: __init__

# 需要導入模塊: import dill [as 別名]
# 或者: from dill import load [as 別名]
def __init__(self):
        self.executables = OrderedDict()
        self.loaded = False # TODO: Think about it. Do we need load?
        self.branches = 1
        self.trials = 2
        self.workers = 1
        self.bar = False
        self.name = 'research'
        self.worker_class = PipelineWorker
        self.devices = None
        self.domain = None
        self.n_iters = None
        self.timeout = 5
        self.n_configs = None
        self.n_reps = None
        self.n_configs = None
        self.repeat_each = None
        self.logger = FileLogger()

        # update parameters for config. None or dict with keys (function, params, cache)
        self._update_config = None
        # update parameters for domain. None or dict with keys (function, each)
        self._update_domain = None
        self.n_updates = 0 
開發者ID:analysiscenter,項目名稱:batchflow,代碼行數:26,代碼來源:research.py

示例3: __init__

# 需要導入模塊: import dill [as 別名]
# 或者: from dill import load [as 別名]
def __init__(self, *args, **kwargs):
        self.full_config = Config()
        self.session = kwargs.get('session', None)
        self.graph = tf.Graph() if self.session is None else self.session.graph
        self._graph_context = None
        self._train_lock = threading.Lock()

        # Parameters of batch processing: splitting batches into parts and/or using multiple devices to process data
        self.microbatch = None
        self.devices = []
        self.leading_device = None
        self.device_to_scope = {}
        self.scope_to_device = {}
        self.multi_device = False

        # Private storage for often used tensors
        self._attrs = dict()

        # Save/load things
        self._saver = None
        self.preserve = ['_attrs', 'microbatch',
                         'devices', 'leading_device', 'device_to_scope', 'scope_to_device', 'multi_device']

        super().__init__(*args, **kwargs) 
開發者ID:analysiscenter,項目名稱:batchflow,代碼行數:26,代碼來源:base.py

示例4: prepare_dataloaders

# 需要導入模塊: import dill [as 別名]
# 或者: from dill import load [as 別名]
def prepare_dataloaders(opt, device):
    batch_size = opt.batch_size
    data = pickle.load(open(opt.data_pkl, 'rb'))

    opt.max_token_seq_len = data['settings'].max_len
    opt.src_pad_idx = data['vocab']['src'].vocab.stoi[Constants.PAD_WORD]
    opt.trg_pad_idx = data['vocab']['trg'].vocab.stoi[Constants.PAD_WORD]

    opt.src_vocab_size = len(data['vocab']['src'].vocab)
    opt.trg_vocab_size = len(data['vocab']['trg'].vocab)

    #========= Preparing Model =========#
    if opt.embs_share_weight:
        assert data['vocab']['src'].vocab.stoi == data['vocab']['trg'].vocab.stoi, \
            'To sharing word embedding the src/trg word2idx table shall be the same.'

    fields = {'src': data['vocab']['src'], 'trg':data['vocab']['trg']}

    train = Dataset(examples=data['train'], fields=fields)
    val = Dataset(examples=data['valid'], fields=fields)

    train_iterator = BucketIterator(train, batch_size=batch_size, device=device, train=True)
    val_iterator = BucketIterator(val, batch_size=batch_size, device=device)

    return train_iterator, val_iterator 
開發者ID:jadore801120,項目名稱:attention-is-all-you-need-pytorch,代碼行數:27,代碼來源:train.py

示例5: test_create

# 需要導入模塊: import dill [as 別名]
# 或者: from dill import load [as 別名]
def test_create(self, mock):

        value = 1
        function_name = 'test_function'

        @Lambda(name=function_name, bucket='test', key='test', client=self.client)
        def foo():
            return value

        package = DeploymentPackage(foo)

        zfp = zipfile.ZipFile(StringIO(package.zip_bytes(foo.dumped_code)), "r")
        func = dill.load(zfp.open('.lambda.dump'))
        self.assertEqual(func(), value)

        resp_create = foo.create()
        self.assertEqual(resp_create['FunctionName'], function_name)

        # moto doesn't support ZipFile only lambda deployments, while
        # aws doen't allow other arguments when scpesifying ZipFile argument
        #resp_get = foo.get()
        #self.assertEqual(resp_get['Configuration']['FunctionName'], function_name) 
開發者ID:ZhukovAlexander,項目名稱:lambdify,代碼行數:24,代碼來源:test_lambda.py

示例6: load_embedding_matrix

# 需要導入模塊: import dill [as 別名]
# 或者: from dill import load [as 別名]
def load_embedding_matrix(
        self,
        embedding_matrix: np.ndarray,
        name: str = 'embedding'
    ):
        """
        Load an embedding matrix.

        Load an embedding matrix into the model's embedding layer. The name
        of the embedding layer is specified by `name`. For models with only
        one embedding layer, set `name='embedding'` when creating the keras
        layer, and use the default `name` when load the matrix. For models
        with more than one embedding layers, initialize keras layer with
        different layer names, and set `name` accordingly to load a matrix
        to a chosen layer.

        :param embedding_matrix: Embedding matrix to be loaded.
        :param name: Name of the layer. (default: 'embedding')
        """
        self.get_embedding_layer(name).set_weights([embedding_matrix]) 
開發者ID:NTMC-Community,項目名稱:MatchZoo,代碼行數:22,代碼來源:base_model.py

示例7: _register_dill

# 需要導入模塊: import dill [as 別名]
# 或者: from dill import load [as 別名]
def _register_dill(self):
        def encode(obj, dumper=dill_dumps):
            return dumper(obj, protocol=pickle_protocol)

        def decode(s):
            return pickle_loads(str_to_bytes(s), load=dill_load)

        registry.register(
            name='dill',
            encoder=encode,
            decoder=decode,
            content_type='application/x-python-serialize',
            content_encoding='binary'
        )

    # the same as upstream, but we need to copy it here so we can access it 
開發者ID:briancappello,項目名稱:flask-unchained,代碼行數:18,代碼來源:celery.py

示例8: load

# 需要導入模塊: import dill [as 別名]
# 或者: from dill import load [as 別名]
def load(path, act_params, num_cpu=16):
  """Load act function that was returned by learn function.

  Parameters
  ----------
  path: str
      path to the act function pickle
  num_cpu: int
      number of cpus to use for executing the policy

  Returns
  -------
  act: ActWrapper
      function that takes a batch of observations
      and returns actions.
  """
  return ActWrapper.load(path, num_cpu=num_cpu, act_params=act_params) 
開發者ID:llSourcell,項目名稱:A-Guide-to-DeepMinds-StarCraft-AI-Environment,代碼行數:19,代碼來源:dqfd.py

示例9: _runAllDeferredFunctions

# 需要導入模塊: import dill [as 別名]
# 或者: from dill import load [as 別名]
def _runAllDeferredFunctions(self, fileObj):
        """
        Read and run deferred functions until EOF from the given open file.
        """

        try:
            while True:
                # Load each function
                deferredFunction = dill.load(fileObj)
                logger.debug("Loaded deferred function %s" % repr(deferredFunction))
                # Run it
                self._runDeferredFunction(deferredFunction)
        except EOFError as e:
            # This is expected and means we read all the complete entries.
            logger.debug("Out of deferred functions!")
            pass 
開發者ID:DataBiosphere,項目名稱:toil,代碼行數:18,代碼來源:deferred.py

示例10: read_model

# 需要導入模塊: import dill [as 別名]
# 或者: from dill import load [as 別名]
def read_model(file_name):
    print('Reading model from file: %s' % file_name)
    model = dill.load(open(file_name, 'rb'))
    return model 
開發者ID:Andres-Hernandez,項目名稱:CalibrationNN,代碼行數:6,代碼來源:neural_network.py

示例11: create_fields

# 需要導入模塊: import dill [as 別名]
# 或者: from dill import load [as 別名]
def create_fields(opt):
    
    spacy_langs = ['en', 'fr', 'de', 'es', 'pt', 'it', 'nl']
    if opt.src_lang not in spacy_langs:
        print('invalid src language: ' + opt.src_lang + 'supported languages : ' + spacy_langs)  
    if opt.trg_lang not in spacy_langs:
        print('invalid trg language: ' + opt.trg_lang + 'supported languages : ' + spacy_langs)
    
    print("loading spacy tokenizers...")
    
    t_src = tokenize(opt.src_lang)
    t_trg = tokenize(opt.trg_lang)

    TRG = data.Field(lower=True, tokenize=t_trg.tokenizer, init_token='<sos>', eos_token='<eos>')
    SRC = data.Field(lower=True, tokenize=t_src.tokenizer)

    if opt.load_weights is not None:
        try:
            print("loading presaved fields...")
            SRC = pickle.load(open(f'{opt.load_weights}/SRC.pkl', 'rb'))
            TRG = pickle.load(open(f'{opt.load_weights}/TRG.pkl', 'rb'))
        except:
            print("error opening SRC.pkl and TXT.pkl field files, please ensure they are in " + opt.load_weights + "/")
            quit()
        
    return(SRC, TRG) 
開發者ID:SamLynnEvans,項目名稱:Transformer,代碼行數:28,代碼來源:Process.py

示例12: create_dataset

# 需要導入模塊: import dill [as 別名]
# 或者: from dill import load [as 別名]
def create_dataset(opt, SRC, TRG):

    print("creating dataset and iterator... ")

    raw_data = {'src' : [line for line in opt.src_data], 'trg': [line for line in opt.trg_data]}
    df = pd.DataFrame(raw_data, columns=["src", "trg"])
    
    mask = (df['src'].str.count(' ') < opt.max_strlen) & (df['trg'].str.count(' ') < opt.max_strlen)
    df = df.loc[mask]

    df.to_csv("translate_transformer_temp.csv", index=False)
    
    data_fields = [('src', SRC), ('trg', TRG)]
    train = data.TabularDataset('./translate_transformer_temp.csv', format='csv', fields=data_fields)

    train_iter = MyIterator(train, batch_size=opt.batchsize, device=opt.device,
                        repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
                        batch_size_fn=batch_size_fn, train=True, shuffle=True)
    
    os.remove('translate_transformer_temp.csv')

    if opt.load_weights is None:
        SRC.build_vocab(train)
        TRG.build_vocab(train)
        if opt.checkpoint > 0:
            try:
                os.mkdir("weights")
            except:
                print("weights folder already exists, run program with -load_weights weights to load them")
                quit()
            pickle.dump(SRC, open('weights/SRC.pkl', 'wb'))
            pickle.dump(TRG, open('weights/TRG.pkl', 'wb'))

    opt.src_pad = SRC.vocab.stoi['<pad>']
    opt.trg_pad = TRG.vocab.stoi['<pad>']

    opt.train_len = get_len(train_iter)

    return train_iter 
開發者ID:SamLynnEvans,項目名稱:Transformer,代碼行數:41,代碼來源:Process.py

示例13: undillify

# 需要導入模塊: import dill [as 別名]
# 或者: from dill import load [as 別名]
def undillify(url, str_version = False):
    '''Reads back in a serialized object matching the filename of the given url'''
    
    fn = os.path.join('webpage_cache', strip_url(url) + '.dill')
    string_version = dill.load(open(fn, 'rb'))
    
    if str_version:
        return string_version
    else:
        return BeautifulSoup(string_version) 
開發者ID:zbeaver4,項目名稱:python-webpage-monitor-slackbot,代碼行數:12,代碼來源:monitorbot.py

示例14: load

# 需要導入模塊: import dill [as 別名]
# 或者: from dill import load [as 別名]
def load(filename):
  """ Wrapper to load an object from a file."""
  with tf.gfile.Open(filename, 'rb') as f:
    return pickle.load(f) 
開發者ID:GoogleCloudPlatform,項目名稱:cloudml-samples,代碼行數:6,代碼來源:preprocess.py

示例15: load

# 需要導入模塊: import dill [as 別名]
# 或者: from dill import load [as 別名]
def load(filename):
  with tf.gfile.Open(filename, 'rb') as f:
    return pickle.load(f) 
開發者ID:GoogleCloudPlatform,項目名稱:cloudml-samples,代碼行數:5,代碼來源:task.py


注:本文中的dill.load方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。