當前位置: 首頁>>代碼示例>>Python>>正文


Python utils.make_dir方法代碼示例

本文整理匯總了Python中utils.make_dir方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.make_dir方法的具體用法?Python utils.make_dir怎麽用?Python utils.make_dir使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在utils的用法示例。


在下文中一共展示了utils.make_dir方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import make_dir [as 別名]
def __init__(self, name = None, **kwargs):
        super(JDItemInfoSpider, self).__init__(name, **kwargs)
        self.url = kwargs.get("url")
        self.guid = kwargs.get('guid', 'guid')
        self.product_id = kwargs.get('product_id')
        # self.url = 'https://item.jd.com/11478178241.html'
        # self.url = 'https://item.jd.com/4142680.html'
        # self.url = 'https://item.jd.com/3133859.html'
        # self.url = 'https://item.jd.com/3995645.html'
        # self.product_id = 3995645
        self.log('product_id:%s' % self.product_id)
        self.item_table = 'item_%s' % self.product_id
        self.urls_key = '%s_urls' % self.product_id

        self.log_dir = 'log/%s' % self.product_id
        self.is_record_page = False

        self.sql = kwargs.get('sql')
        self.red = kwargs.get('red')

        if self.is_record_page:
            utils.make_dir(self.log_dir) 
開發者ID:awolfly9,項目名稱:jd_analysis,代碼行數:24,代碼來源:jd_item_info.py

示例2: __init__

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import make_dir [as 別名]
def __init__(self, name = None, **kwargs):
        super(JDCommentSpider, self).__init__(name, **kwargs)
        self.url = kwargs.get("url")
        self.guid = kwargs.get('guid', 'guid')
        self.product_id = kwargs.get('product_id')
        # self.url = 'https://item.jd.com/11478178241.html'
        # self.url = 'https://item.jd.com/4142680.html'
        # self.url = 'https://item.jd.com/3133859.html'
        # self.url = 'https://item.jd.com/3995645.html'
        # self.product_id = 3995645
        self.log('product_id:%s' % self.product_id)
        self.item_table = 'item_%s' % self.product_id
        self.urls_key = '%s_urls' % self.product_id

        self.log_dir = 'log/%s' % self.product_id
        self.is_record_page = False

        self.sql = kwargs.get('sql')
        self.red = kwargs.get('red')
        proxymng.red = self.red
        
        if self.is_record_page:
            utils.make_dir(self.log_dir)

        self.init() 
開發者ID:awolfly9,項目名稱:jd_analysis,代碼行數:27,代碼來源:jd_comment.py

示例3: load

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import make_dir [as 別名]
def load(self):
        '''
        Loads replay memory (attributes and arrays) into self, if possible.
        '''
        # Create out dir
        utils.make_dir(self.data_dir)
        try:
            print('Loading Memory data into Replay Memory Instance...')
            # Load property list
            d = json.load(open('{}/properties.json'.format(self.data_dir)))
            # Load numpy arrays
            self._load_arrays(self.data_dir, d['saved_size'])

            print('Finished loading Memory data into Replay Memory Instance!')

        except IOError as e:
            self.__init__(self.memory_size,
                          data_dir=self.data_dir, load_existing=False)
            print("I/O error({0}): {1}".format(e.errno, e.strerror))
            print("Couldn't find initial values for Replay Memory, instance init as new.") 
開發者ID:diegoalejogm,項目名稱:deep-q-learning,代碼行數:22,代碼來源:models.py

示例4: init

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import make_dir [as 別名]
def init(self):
        self.dir_log = 'log/validator/%s' % self.name
        utils.make_dir(self.dir_log)

        self.sql.init_proxy_table(self.name) 
開發者ID:awolfly9,項目名稱:IPProxyTool,代碼行數:7,代碼來源:validator.py

示例5: init

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import make_dir [as 別名]
def init(self):
        self.meta = {
            'download_timeout': self.timeout,
        }

        self.dir_log = 'log/proxy/%s' % self.name
        utils.make_dir(self.dir_log)
        self.sql.init_proxy_table(config.free_ipproxy_table) 
開發者ID:awolfly9,項目名稱:IPProxyTool,代碼行數:10,代碼來源:basespider.py

示例6: save

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import make_dir [as 別名]
def save(self, size):
        '''
        Saves replay memory (attributes and arrays).
        '''
        # Create out dir
        utils.make_dir(self.data_dir)
        print('Saving Memory data into Replay Memory Instance...')
        # Save property dict
        with open('{}/properties.json'.format(self.data_dir), 'w') as f:
            json.dump(self.to_dict(size), f)
        # Save arrays
        self._save_arrays(self.data_dir, size) 
開發者ID:diegoalejogm,項目名稱:deep-q-learning,代碼行數:14,代碼來源:models.py

示例7: plot_perm_ttest_results

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import make_dir [as 別名]
def plot_perm_ttest_results(events_id, inverse_method='dSPM', plot_type='scatter_plot'):
    print('plot_perm_ttest_results')
    all_data = defaultdict(dict)
    fsave_vertices = [np.arange(10242), np.arange(10242)]
    fs_pts = mne.vertex_to_mni(fsave_vertices, [0, 1], 'fsaverage', LOCAL_SUBJECTS_DIR) # 0 for lh
    for cond_id, cond_name, patient, hc, data in patients_hcs_conds_gen(events_id, True, inverse_method):
        all_data[patient][hc] = data[()]
    print(all_data.keys())
    for patient, pat_data in all_data.iteritems():
        print(patient)
        fol = op.join(LOCAL_ROOT_DIR, 'permutation_ttest_results', patient)
        utils.make_dir(fol)
        if op.isfile(op.join(fol, 'perm_ttest_points.npz')):
            d = np.load(op.join(fol, 'perm_ttest_points.npz'))
            if plot_type == 'scatter_plot':
                points, values = d['points'][()], d['values'][()]
            elif plot_type == 'pysurfer':
                vertices, vertives_values = d['vertices'][()], d['vertives_values'][()]
        else:
            points, values, vertices, vertives_values = calc_points(pat_data, fs_pts)
            np.savez(op.join(fol, 'perm_ttest_points'), points=points, values=values, vertices=vertices, vertives_values=vertives_values)
        max_vals = 8 # int(np.percentile([max(v) for v in values.values()], 70))
        print(max_vals)
        fol = op.join(fol, '{}_figures'.format(plot_type))
        utils.make_dir(fol)
        if plot_type == 'scatter_plot':
            scatter_plot_perm_ttest_results(points, values, fs_pts, max_vals, fol)
        elif plot_type == 'pysurfer':
            pysurfer_plot_perm_ttest_results(vertices, vertives_values, max_vals, fol) 
開發者ID:pelednoam,項目名稱:mmvt,代碼行數:31,代碼來源:meg_statistics.py

示例8: create_local_folds

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import make_dir [as 別名]
def create_local_folds():
    for fol in ['eop', 'evo', 'inv', 'stc', 'stc_epochs', 'stc_morphed',
                'stc_epochs_morphed', 'stc_clusters', 'clusters_results',
                'permutation_ttest_results', 'results_for_blender']:
        utils.make_dir(op.join(LOCAL_ROOT_DIR, fol)) 
開發者ID:pelednoam,項目名稱:mmvt,代碼行數:7,代碼來源:meg_statistics.py

示例9: main

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import make_dir [as 別名]
def main():
    vocab = (
            " $%'()+,-./0123456789:;=?ABCDEFGHIJKLMNOPQRSTUVWXYZ"
            "\\^_abcdefghijklmnopqrstuvwxyz{|}")
    seq = tf.placeholder(tf.int32, [None, None])
    temp = tf.placeholder(tf.float32)
    loss, sample, in_state, out_state = create_model(seq, temp, vocab)
    global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
    optimizer = tf.train.AdamOptimizer(LR).minimize(loss, global_step=global_step)
    utils.make_dir('checkpoints')
    utils.make_dir('checkpoints/arvix')
    training(vocab, seq, loss, optimizer, global_step, temp, sample, in_state, out_state) 
開發者ID:chiphuyen,項目名稱:stanford-tensorflow-tutorials,代碼行數:14,代碼來源:11_char_rnn_gist.py

示例10: build_vocab

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import make_dir [as 別名]
def build_vocab(words, vocab_size):
    """ Build vocabulary of VOCAB_SIZE most frequent words """
    dictionary = dict()
    count = [('UNK', -1)]
    count.extend(Counter(words).most_common(vocab_size - 1))
    index = 0
    utils.make_dir('processed')
    with open('processed/vocab_1000.tsv', "w") as f:
        for word, _ in count:
            dictionary[word] = index
            if index < 1000:
                f.write(word + "\n")
            index += 1
    index_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
    return dictionary, index_dictionary 
開發者ID:chiphuyen,項目名稱:stanford-tensorflow-tutorials,代碼行數:17,代碼來源:process_data.py

示例11: main

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import make_dir [as 別名]
def main():
    with tf.variable_scope('input') as scope:
        # use variable instead of placeholder because we're training the intial image to make it
        # look like both the content image and the style image
        input_image = tf.Variable(np.zeros([1, IMAGE_HEIGHT, IMAGE_WIDTH, 3]), dtype=tf.float32)
    
    utils.download(VGG_DOWNLOAD_LINK, VGG_MODEL, EXPECTED_BYTES)
    utils.make_dir('checkpoints')
    utils.make_dir('outputs')
    model = vgg_model.load_vgg(VGG_MODEL, input_image)
    model['global_step'] = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
    
    content_image = utils.get_resized_image(CONTENT_IMAGE, IMAGE_HEIGHT, IMAGE_WIDTH)
    content_image = content_image - MEAN_PIXELS
    style_image = utils.get_resized_image(STYLE_IMAGE, IMAGE_HEIGHT, IMAGE_WIDTH)
    style_image = style_image - MEAN_PIXELS

    model['content_loss'], model['style_loss'], model['total_loss'] = _create_losses(model, 
                                                    input_image, content_image, style_image)
    ###############################
    ## TO DO: create optimizer
    ## model['optimizer'] = ...
    ###############################
    model['summary_op'] = _create_summary(model)

    initial_image = utils.generate_noise_image(content_image, IMAGE_HEIGHT, IMAGE_WIDTH, NOISE_RATIO)
    train(model, input_image, initial_image) 
開發者ID:chiphuyen,項目名稱:stanford-tensorflow-tutorials,代碼行數:29,代碼來源:style_transfer.py

示例12: main

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import make_dir [as 別名]
def main():
    with tf.variable_scope('input') as scope:
        # use variable instead of placeholder because we're training the intial image to make it
        # look like both the content image and the style image
        input_image = tf.Variable(np.zeros([1, IMAGE_HEIGHT, IMAGE_WIDTH, 3]), dtype=tf.float32)
    
    utils.download(VGG_DOWNLOAD_LINK, VGG_MODEL, EXPECTED_BYTES)
    utils.make_dir('checkpoints')
    utils.make_dir('outputs')
    model = vgg_model.load_vgg(VGG_MODEL, input_image)
    model['global_step'] = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')

    content_image = utils.get_resized_image(CONTENT_IMAGE, IMAGE_HEIGHT, IMAGE_WIDTH)
    content_image = content_image - MEAN_PIXELS
    style_image = utils.get_resized_image(STYLE_IMAGE, IMAGE_HEIGHT, IMAGE_WIDTH)
    style_image = style_image - MEAN_PIXELS

    model['content_loss'], model['style_loss'], model['total_loss'] = _create_losses(model, 
                                                    input_image, content_image, style_image)
    ###############################
    ## TO DO: create optimizer
    model['optimizer'] = tf.train.AdamOptimizer(LR).minimize(model['total_loss'], 
                                                            global_step=model['global_step'])
    ###############################
    model['summary_op'] = _create_summary(model)

    initial_image = utils.generate_noise_image(content_image, IMAGE_HEIGHT, IMAGE_WIDTH, NOISE_RATIO)
    train(model, input_image, initial_image) 
開發者ID:chiphuyen,項目名稱:stanford-tensorflow-tutorials,代碼行數:30,代碼來源:style_transfer.py

示例13: train_model

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import make_dir [as 別名]
def train_model(model, batch_gen, num_train_steps, weights_fld):
    saver = tf.train.Saver() # defaults to saving all variables - in this case embed_matrix, nce_weight, nce_bias

    initial_step = 0
    utils.make_dir('checkpoints')
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        ckpt = tf.train.get_checkpoint_state(os.path.dirname('checkpoints/checkpoint'))
        # if that checkpoint exists, restore from checkpoint
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)

        total_loss = 0.0 # we use this to calculate late average loss in the last SKIP_STEP steps
        writer = tf.summary.FileWriter('improved_graph/lr' + str(LEARNING_RATE), sess.graph)
        initial_step = model.global_step.eval()
        for index in range(initial_step, initial_step + num_train_steps):
            centers, targets = next(batch_gen)
            feed_dict={model.center_words: centers, model.target_words: targets}
            loss_batch, _, summary = sess.run([model.loss, model.optimizer, model.summary_op], 
                                              feed_dict=feed_dict)
            writer.add_summary(summary, global_step=index)
            total_loss += loss_batch
            if (index + 1) % SKIP_STEP == 0:
                print('Average loss at step {}: {:5.1f}'.format(index, total_loss / SKIP_STEP))
                total_loss = 0.0
                saver.save(sess, 'checkpoints/skip-gram', index)
        
        ####################
        # code to visualize the embeddings. uncomment the below to visualize embeddings
        # run "'tensorboard --logdir='processed'" to see the embeddings
        # final_embed_matrix = sess.run(model.embed_matrix)
        
        # # it has to variable. constants don't work here. you can't reuse model.embed_matrix
        # embedding_var = tf.Variable(final_embed_matrix[:1000], name='embedding')
        # sess.run(embedding_var.initializer)

        # config = projector.ProjectorConfig()
        # summary_writer = tf.summary.FileWriter('processed')

        # # add embedding to the config file
        # embedding = config.embeddings.add()
        # embedding.tensor_name = embedding_var.name
        
        # # link this tensor to its metadata file, in this case the first 500 words of vocab
        # embedding.metadata_path = 'processed/vocab_1000.tsv'

        # # saves a configuration file that TensorBoard will read during startup.
        # projector.visualize_embeddings(summary_writer, config)
        # saver_embed = tf.train.Saver([embedding_var])
        # saver_embed.save(sess, 'processed/model3.ckpt', 1) 
開發者ID:chiphuyen,項目名稱:stanford-tensorflow-tutorials,代碼行數:52,代碼來源:04_word2vec_visualize.py


注:本文中的utils.make_dir方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。