当前位置: 首页>>代码示例>>Python>>正文


Python utils.make_dir方法代码示例

本文整理汇总了Python中utils.make_dir方法的典型用法代码示例。如果您正苦于以下问题:Python utils.make_dir方法的具体用法?Python utils.make_dir怎么用?Python utils.make_dir使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在utils的用法示例。


在下文中一共展示了utils.make_dir方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: import utils [as 别名]
# 或者: from utils import make_dir [as 别名]
def __init__(self, name = None, **kwargs):
        super(JDItemInfoSpider, self).__init__(name, **kwargs)
        self.url = kwargs.get("url")
        self.guid = kwargs.get('guid', 'guid')
        self.product_id = kwargs.get('product_id')
        # self.url = 'https://item.jd.com/11478178241.html'
        # self.url = 'https://item.jd.com/4142680.html'
        # self.url = 'https://item.jd.com/3133859.html'
        # self.url = 'https://item.jd.com/3995645.html'
        # self.product_id = 3995645
        self.log('product_id:%s' % self.product_id)
        self.item_table = 'item_%s' % self.product_id
        self.urls_key = '%s_urls' % self.product_id

        self.log_dir = 'log/%s' % self.product_id
        self.is_record_page = False

        self.sql = kwargs.get('sql')
        self.red = kwargs.get('red')

        if self.is_record_page:
            utils.make_dir(self.log_dir) 
开发者ID:awolfly9,项目名称:jd_analysis,代码行数:24,代码来源:jd_item_info.py

示例2: __init__

# 需要导入模块: import utils [as 别名]
# 或者: from utils import make_dir [as 别名]
def __init__(self, name = None, **kwargs):
        super(JDCommentSpider, self).__init__(name, **kwargs)
        self.url = kwargs.get("url")
        self.guid = kwargs.get('guid', 'guid')
        self.product_id = kwargs.get('product_id')
        # self.url = 'https://item.jd.com/11478178241.html'
        # self.url = 'https://item.jd.com/4142680.html'
        # self.url = 'https://item.jd.com/3133859.html'
        # self.url = 'https://item.jd.com/3995645.html'
        # self.product_id = 3995645
        self.log('product_id:%s' % self.product_id)
        self.item_table = 'item_%s' % self.product_id
        self.urls_key = '%s_urls' % self.product_id

        self.log_dir = 'log/%s' % self.product_id
        self.is_record_page = False

        self.sql = kwargs.get('sql')
        self.red = kwargs.get('red')
        proxymng.red = self.red
        
        if self.is_record_page:
            utils.make_dir(self.log_dir)

        self.init() 
开发者ID:awolfly9,项目名称:jd_analysis,代码行数:27,代码来源:jd_comment.py

示例3: load

# 需要导入模块: import utils [as 别名]
# 或者: from utils import make_dir [as 别名]
def load(self):
        '''
        Loads replay memory (attributes and arrays) into self, if possible.
        '''
        # Create out dir
        utils.make_dir(self.data_dir)
        try:
            print('Loading Memory data into Replay Memory Instance...')
            # Load property list
            d = json.load(open('{}/properties.json'.format(self.data_dir)))
            # Load numpy arrays
            self._load_arrays(self.data_dir, d['saved_size'])

            print('Finished loading Memory data into Replay Memory Instance!')

        except IOError as e:
            self.__init__(self.memory_size,
                          data_dir=self.data_dir, load_existing=False)
            print("I/O error({0}): {1}".format(e.errno, e.strerror))
            print("Couldn't find initial values for Replay Memory, instance init as new.") 
开发者ID:diegoalejogm,项目名称:deep-q-learning,代码行数:22,代码来源:models.py

示例4: init

# 需要导入模块: import utils [as 别名]
# 或者: from utils import make_dir [as 别名]
def init(self):
        self.dir_log = 'log/validator/%s' % self.name
        utils.make_dir(self.dir_log)

        self.sql.init_proxy_table(self.name) 
开发者ID:awolfly9,项目名称:IPProxyTool,代码行数:7,代码来源:validator.py

示例5: init

# 需要导入模块: import utils [as 别名]
# 或者: from utils import make_dir [as 别名]
def init(self):
        self.meta = {
            'download_timeout': self.timeout,
        }

        self.dir_log = 'log/proxy/%s' % self.name
        utils.make_dir(self.dir_log)
        self.sql.init_proxy_table(config.free_ipproxy_table) 
开发者ID:awolfly9,项目名称:IPProxyTool,代码行数:10,代码来源:basespider.py

示例6: save

# 需要导入模块: import utils [as 别名]
# 或者: from utils import make_dir [as 别名]
def save(self, size):
        '''
        Saves replay memory (attributes and arrays).
        '''
        # Create out dir
        utils.make_dir(self.data_dir)
        print('Saving Memory data into Replay Memory Instance...')
        # Save property dict
        with open('{}/properties.json'.format(self.data_dir), 'w') as f:
            json.dump(self.to_dict(size), f)
        # Save arrays
        self._save_arrays(self.data_dir, size) 
开发者ID:diegoalejogm,项目名称:deep-q-learning,代码行数:14,代码来源:models.py

示例7: plot_perm_ttest_results

# 需要导入模块: import utils [as 别名]
# 或者: from utils import make_dir [as 别名]
def plot_perm_ttest_results(events_id, inverse_method='dSPM', plot_type='scatter_plot'):
    print('plot_perm_ttest_results')
    all_data = defaultdict(dict)
    fsave_vertices = [np.arange(10242), np.arange(10242)]
    fs_pts = mne.vertex_to_mni(fsave_vertices, [0, 1], 'fsaverage', LOCAL_SUBJECTS_DIR) # 0 for lh
    for cond_id, cond_name, patient, hc, data in patients_hcs_conds_gen(events_id, True, inverse_method):
        all_data[patient][hc] = data[()]
    print(all_data.keys())
    for patient, pat_data in all_data.iteritems():
        print(patient)
        fol = op.join(LOCAL_ROOT_DIR, 'permutation_ttest_results', patient)
        utils.make_dir(fol)
        if op.isfile(op.join(fol, 'perm_ttest_points.npz')):
            d = np.load(op.join(fol, 'perm_ttest_points.npz'))
            if plot_type == 'scatter_plot':
                points, values = d['points'][()], d['values'][()]
            elif plot_type == 'pysurfer':
                vertices, vertives_values = d['vertices'][()], d['vertives_values'][()]
        else:
            points, values, vertices, vertives_values = calc_points(pat_data, fs_pts)
            np.savez(op.join(fol, 'perm_ttest_points'), points=points, values=values, vertices=vertices, vertives_values=vertives_values)
        max_vals = 8 # int(np.percentile([max(v) for v in values.values()], 70))
        print(max_vals)
        fol = op.join(fol, '{}_figures'.format(plot_type))
        utils.make_dir(fol)
        if plot_type == 'scatter_plot':
            scatter_plot_perm_ttest_results(points, values, fs_pts, max_vals, fol)
        elif plot_type == 'pysurfer':
            pysurfer_plot_perm_ttest_results(vertices, vertives_values, max_vals, fol) 
开发者ID:pelednoam,项目名称:mmvt,代码行数:31,代码来源:meg_statistics.py

示例8: create_local_folds

# 需要导入模块: import utils [as 别名]
# 或者: from utils import make_dir [as 别名]
def create_local_folds():
    for fol in ['eop', 'evo', 'inv', 'stc', 'stc_epochs', 'stc_morphed',
                'stc_epochs_morphed', 'stc_clusters', 'clusters_results',
                'permutation_ttest_results', 'results_for_blender']:
        utils.make_dir(op.join(LOCAL_ROOT_DIR, fol)) 
开发者ID:pelednoam,项目名称:mmvt,代码行数:7,代码来源:meg_statistics.py

示例9: main

# 需要导入模块: import utils [as 别名]
# 或者: from utils import make_dir [as 别名]
def main():
    vocab = (
            " $%'()+,-./0123456789:;=?ABCDEFGHIJKLMNOPQRSTUVWXYZ"
            "\\^_abcdefghijklmnopqrstuvwxyz{|}")
    seq = tf.placeholder(tf.int32, [None, None])
    temp = tf.placeholder(tf.float32)
    loss, sample, in_state, out_state = create_model(seq, temp, vocab)
    global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
    optimizer = tf.train.AdamOptimizer(LR).minimize(loss, global_step=global_step)
    utils.make_dir('checkpoints')
    utils.make_dir('checkpoints/arvix')
    training(vocab, seq, loss, optimizer, global_step, temp, sample, in_state, out_state) 
开发者ID:chiphuyen,项目名称:stanford-tensorflow-tutorials,代码行数:14,代码来源:11_char_rnn_gist.py

示例10: build_vocab

# 需要导入模块: import utils [as 别名]
# 或者: from utils import make_dir [as 别名]
def build_vocab(words, vocab_size):
    """ Build vocabulary of VOCAB_SIZE most frequent words """
    dictionary = dict()
    count = [('UNK', -1)]
    count.extend(Counter(words).most_common(vocab_size - 1))
    index = 0
    utils.make_dir('processed')
    with open('processed/vocab_1000.tsv', "w") as f:
        for word, _ in count:
            dictionary[word] = index
            if index < 1000:
                f.write(word + "\n")
            index += 1
    index_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
    return dictionary, index_dictionary 
开发者ID:chiphuyen,项目名称:stanford-tensorflow-tutorials,代码行数:17,代码来源:process_data.py

示例11: main

# 需要导入模块: import utils [as 别名]
# 或者: from utils import make_dir [as 别名]
def main():
    with tf.variable_scope('input') as scope:
        # use variable instead of placeholder because we're training the intial image to make it
        # look like both the content image and the style image
        input_image = tf.Variable(np.zeros([1, IMAGE_HEIGHT, IMAGE_WIDTH, 3]), dtype=tf.float32)
    
    utils.download(VGG_DOWNLOAD_LINK, VGG_MODEL, EXPECTED_BYTES)
    utils.make_dir('checkpoints')
    utils.make_dir('outputs')
    model = vgg_model.load_vgg(VGG_MODEL, input_image)
    model['global_step'] = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
    
    content_image = utils.get_resized_image(CONTENT_IMAGE, IMAGE_HEIGHT, IMAGE_WIDTH)
    content_image = content_image - MEAN_PIXELS
    style_image = utils.get_resized_image(STYLE_IMAGE, IMAGE_HEIGHT, IMAGE_WIDTH)
    style_image = style_image - MEAN_PIXELS

    model['content_loss'], model['style_loss'], model['total_loss'] = _create_losses(model, 
                                                    input_image, content_image, style_image)
    ###############################
    ## TO DO: create optimizer
    ## model['optimizer'] = ...
    ###############################
    model['summary_op'] = _create_summary(model)

    initial_image = utils.generate_noise_image(content_image, IMAGE_HEIGHT, IMAGE_WIDTH, NOISE_RATIO)
    train(model, input_image, initial_image) 
开发者ID:chiphuyen,项目名称:stanford-tensorflow-tutorials,代码行数:29,代码来源:style_transfer.py

示例12: main

# 需要导入模块: import utils [as 别名]
# 或者: from utils import make_dir [as 别名]
def main():
    with tf.variable_scope('input') as scope:
        # use variable instead of placeholder because we're training the intial image to make it
        # look like both the content image and the style image
        input_image = tf.Variable(np.zeros([1, IMAGE_HEIGHT, IMAGE_WIDTH, 3]), dtype=tf.float32)
    
    utils.download(VGG_DOWNLOAD_LINK, VGG_MODEL, EXPECTED_BYTES)
    utils.make_dir('checkpoints')
    utils.make_dir('outputs')
    model = vgg_model.load_vgg(VGG_MODEL, input_image)
    model['global_step'] = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')

    content_image = utils.get_resized_image(CONTENT_IMAGE, IMAGE_HEIGHT, IMAGE_WIDTH)
    content_image = content_image - MEAN_PIXELS
    style_image = utils.get_resized_image(STYLE_IMAGE, IMAGE_HEIGHT, IMAGE_WIDTH)
    style_image = style_image - MEAN_PIXELS

    model['content_loss'], model['style_loss'], model['total_loss'] = _create_losses(model, 
                                                    input_image, content_image, style_image)
    ###############################
    ## TO DO: create optimizer
    model['optimizer'] = tf.train.AdamOptimizer(LR).minimize(model['total_loss'], 
                                                            global_step=model['global_step'])
    ###############################
    model['summary_op'] = _create_summary(model)

    initial_image = utils.generate_noise_image(content_image, IMAGE_HEIGHT, IMAGE_WIDTH, NOISE_RATIO)
    train(model, input_image, initial_image) 
开发者ID:chiphuyen,项目名称:stanford-tensorflow-tutorials,代码行数:30,代码来源:style_transfer.py

示例13: train_model

# 需要导入模块: import utils [as 别名]
# 或者: from utils import make_dir [as 别名]
def train_model(model, batch_gen, num_train_steps, weights_fld):
    saver = tf.train.Saver() # defaults to saving all variables - in this case embed_matrix, nce_weight, nce_bias

    initial_step = 0
    utils.make_dir('checkpoints')
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        ckpt = tf.train.get_checkpoint_state(os.path.dirname('checkpoints/checkpoint'))
        # if that checkpoint exists, restore from checkpoint
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)

        total_loss = 0.0 # we use this to calculate late average loss in the last SKIP_STEP steps
        writer = tf.summary.FileWriter('improved_graph/lr' + str(LEARNING_RATE), sess.graph)
        initial_step = model.global_step.eval()
        for index in range(initial_step, initial_step + num_train_steps):
            centers, targets = next(batch_gen)
            feed_dict={model.center_words: centers, model.target_words: targets}
            loss_batch, _, summary = sess.run([model.loss, model.optimizer, model.summary_op], 
                                              feed_dict=feed_dict)
            writer.add_summary(summary, global_step=index)
            total_loss += loss_batch
            if (index + 1) % SKIP_STEP == 0:
                print('Average loss at step {}: {:5.1f}'.format(index, total_loss / SKIP_STEP))
                total_loss = 0.0
                saver.save(sess, 'checkpoints/skip-gram', index)
        
        ####################
        # code to visualize the embeddings. uncomment the below to visualize embeddings
        # run "'tensorboard --logdir='processed'" to see the embeddings
        # final_embed_matrix = sess.run(model.embed_matrix)
        
        # # it has to variable. constants don't work here. you can't reuse model.embed_matrix
        # embedding_var = tf.Variable(final_embed_matrix[:1000], name='embedding')
        # sess.run(embedding_var.initializer)

        # config = projector.ProjectorConfig()
        # summary_writer = tf.summary.FileWriter('processed')

        # # add embedding to the config file
        # embedding = config.embeddings.add()
        # embedding.tensor_name = embedding_var.name
        
        # # link this tensor to its metadata file, in this case the first 500 words of vocab
        # embedding.metadata_path = 'processed/vocab_1000.tsv'

        # # saves a configuration file that TensorBoard will read during startup.
        # projector.visualize_embeddings(summary_writer, config)
        # saver_embed = tf.train.Saver([embedding_var])
        # saver_embed.save(sess, 'processed/model3.ckpt', 1) 
开发者ID:chiphuyen,项目名称:stanford-tensorflow-tutorials,代码行数:52,代码来源:04_word2vec_visualize.py


注:本文中的utils.make_dir方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。