当前位置: 首页>>代码示例>>Python>>正文


Python config.LOG_DIR属性代码示例

本文整理汇总了Python中config.LOG_DIR属性的典型用法代码示例。如果您正苦于以下问题:Python config.LOG_DIR属性的具体用法?Python config.LOG_DIR怎么用?Python config.LOG_DIR使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在config的用法示例。


在下文中一共展示了config.LOG_DIR属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: import config [as 别名]
# 或者: from config import LOG_DIR [as 别名]
def main():
    logname = "generate_feature_wordnet_similarity_%s.log"%time_utils._timestamp()
    logger = logging_utils._get_logger(config.LOG_DIR, logname)
    #### NOTE: use data BEFORE STEMMING
    dfAll = pkl_utils._load(config.ALL_DATA_LEMMATIZED)

    # WordNet_Lch_Similarity and WordNet_Wup_Similarity are not used in final submission
    generators = [
        WordNet_Path_Similarity,
        WordNet_Lch_Similarity,
        WordNet_Wup_Similarity,
    ][:1]
    obs_fields_list = []
    target_fields_list = []
    # only search_term and product_title are used in final submission
    obs_fields_list.append( ["search_term", "search_term_alt", "search_term_auto_corrected"][:1] )
    target_fields_list.append( ["product_title", "product_description", "product_attribute"][:1] )
    # double aggregation
    aggregation_mode_prev = ["mean", "max", "min", "median"]
    aggregation_mode = ["mean", "std", "max", "min", "median"]
    for obs_fields, target_fields in zip(obs_fields_list, target_fields_list):
        for generator in generators:
            param_list = [aggregation_mode_prev, aggregation_mode]
            pf = PairwiseFeatureWrapper(generator, dfAll, obs_fields, target_fields, param_list, config.FEAT_DIR, logger)
            pf.go() 
开发者ID:ChenglongChen,项目名称:kaggle-HomeDepot,代码行数:27,代码来源:feature_wordnet_similarity.py

示例2: run_lsa_ngram

# 需要导入模块: import config [as 别名]
# 或者: from config import LOG_DIR [as 别名]
def run_lsa_ngram():
    logname = "generate_feature_lsa_ngram_%s.log"%time_utils._timestamp()
    logger = logging_utils._get_logger(config.LOG_DIR, logname)
    dfAll = pkl_utils._load(config.ALL_DATA_LEMMATIZED_STEMMED)
    dfAll.drop(["product_attribute_list"], inplace=True, axis=1)

    generators = [LSA_Word_Ngram, LSA_Char_Ngram]
    ngrams_list = [[1,2,3], [2,3,4,5]]
    ngrams_list = [[3], [4]]
    # obs_fields = ["search_term", "search_term_alt", "search_term_auto_corrected", "product_title", "product_description"]
    obs_fields = ["search_term", "product_title", "product_description"]
    for generator,ngrams in zip(generators, ngrams_list):
        for ngram in ngrams:
            param_list = [ngram, config.SVD_DIM, config.SVD_N_ITER]
            sf = StandaloneFeatureWrapper(generator, dfAll, obs_fields, param_list, config.FEAT_DIR, logger)
            sf.go() 
开发者ID:ChenglongChen,项目名称:kaggle-HomeDepot,代码行数:18,代码来源:feature_vector_space.py

示例3: run_lsa_ngram_cooc

# 需要导入模块: import config [as 别名]
# 或者: from config import LOG_DIR [as 别名]
def run_lsa_ngram_cooc():
    logname = "generate_feature_lsa_ngram_cooc_%s.log"%time_utils._timestamp()
    logger = logging_utils._get_logger(config.LOG_DIR, logname)
    dfAll = pkl_utils._load(config.ALL_DATA_LEMMATIZED_STEMMED)
    dfAll.drop(["product_attribute_list"], inplace=True, axis=1)

    generators = [LSA_Word_Ngram_Cooc]
    obs_ngrams = [1, 2]
    target_ngrams = [1, 2]
    obs_fields_list = []
    target_fields_list = []
    obs_fields_list.append( ["search_term", "search_term_alt", "search_term_auto_corrected"][:1] )
    target_fields_list.append( ["product_title", "product_description"][:1] )
    for obs_fields, target_fields in zip(obs_fields_list, target_fields_list):
        for obs_ngram in obs_ngrams:
            for target_ngram in target_ngrams:
                for generator in generators:
                    param_list = [obs_ngram, target_ngram, config.SVD_DIM, config.SVD_N_ITER]
                    pf = PairwiseFeatureWrapper(generator, dfAll, obs_fields, target_fields, param_list, config.FEAT_DIR, logger)
                    pf.go() 
开发者ID:ChenglongChen,项目名称:kaggle-HomeDepot,代码行数:22,代码来源:feature_vector_space.py

示例4: run_lsa_ngram_pair

# 需要导入模块: import config [as 别名]
# 或者: from config import LOG_DIR [as 别名]
def run_lsa_ngram_pair():
    logname = "generate_feature_lsa_ngram_pair_%s.log"%time_utils._timestamp()
    logger = logging_utils._get_logger(config.LOG_DIR, logname)
    dfAll = pkl_utils._load(config.ALL_DATA_LEMMATIZED_STEMMED)
    dfAll.drop(["product_attribute_list"], inplace=True, axis=1)

    generators = [LSA_Word_Ngram_Pair]
    ngrams = [1, 2]
    obs_fields_list = []
    target_fields_list = []
    obs_fields_list.append( ["search_term", "search_term_alt", "search_term_auto_corrected"][:1] )
    target_fields_list.append( ["product_title", "product_description"] )
    for obs_fields, target_fields in zip(obs_fields_list, target_fields_list):
        for ngram in ngrams:
            for generator in generators:
                param_list = [ngram, config.SVD_DIM, config.SVD_N_ITER]
                pf = PairwiseFeatureWrapper(generator, dfAll, obs_fields, target_fields, param_list, config.FEAT_DIR, logger)
                pf.go()


# memory error (use feature_tsne.R instead) 
开发者ID:ChenglongChen,项目名称:kaggle-HomeDepot,代码行数:23,代码来源:feature_vector_space.py

示例5: run_lsa_ngram_cosinesim

# 需要导入模块: import config [as 别名]
# 或者: from config import LOG_DIR [as 别名]
def run_lsa_ngram_cosinesim():
    logname = "generate_feature_lsa_ngram_cosinesim_%s.log"%time_utils._timestamp()
    logger = logging_utils._get_logger(config.LOG_DIR, logname)
    dfAll = pkl_utils._load(config.ALL_DATA_LEMMATIZED_STEMMED)
    dfAll.drop(["product_attribute_list"], inplace=True, axis=1)

    generators = [LSA_Word_Ngram_CosineSim, LSA_Char_Ngram_CosineSim]
    ngrams_list = [[1,2,3], [2,3,4,5]]
    ngrams_list = [[3], [4]]
    obs_fields_list = []
    target_fields_list = []
    obs_fields_list.append( ["search_term", "search_term_alt", "search_term_auto_corrected"][:1] )
    target_fields_list.append( ["product_title", "product_description", "product_attribute"] )
    for obs_fields, target_fields in zip(obs_fields_list, target_fields_list):
        for generator,ngrams in zip(generators, ngrams_list):
            for ngram in ngrams:
                param_list = [ngram, config.SVD_DIM, config.SVD_N_ITER]
                pf = PairwiseFeatureWrapper(generator, dfAll, obs_fields, target_fields, param_list, config.FEAT_DIR, logger)
                pf.go() 
开发者ID:ChenglongChen,项目名称:kaggle-HomeDepot,代码行数:21,代码来源:feature_vector_space.py

示例6: run_char_dist_sim

# 需要导入模块: import config [as 别名]
# 或者: from config import LOG_DIR [as 别名]
def run_char_dist_sim():
    logname = "generate_feature_char_dist_sim_%s.log"%time_utils._timestamp()
    logger = logging_utils._get_logger(config.LOG_DIR, logname)
    dfAll = pkl_utils._load(config.ALL_DATA_LEMMATIZED_STEMMED)
    dfAll.drop(["product_attribute_list"], inplace=True, axis=1)
    
    generators = [CharDistribution_Ratio, CharDistribution_CosineSim, CharDistribution_KL]
    obs_fields_list = []
    target_fields_list = []
    obs_fields_list.append( ["search_term", "search_term_alt", "search_term_auto_corrected"][:1] )
    target_fields_list.append( ["product_title", "product_description", "product_attribute"] )
    for obs_fields, target_fields in zip(obs_fields_list, target_fields_list):
        for generator in generators:
            param_list = []
            pf = PairwiseFeatureWrapper(generator, dfAll, obs_fields, target_fields, param_list, config.FEAT_DIR, logger)
            pf.go() 
开发者ID:ChenglongChen,项目名称:kaggle-HomeDepot,代码行数:18,代码来源:feature_vector_space.py

示例7: __init__

# 需要导入模块: import config [as 别名]
# 或者: from config import LOG_DIR [as 别名]
def __init__(self, feature_list, feature_name, feature_suffix=".csv",
                feature_level=2, meta_feature_dict={}, corr_threshold=0):
        self.feature_name = feature_name
        self.feature_list = feature_list
        self.feature_suffix = feature_suffix
        self.feature_level = feature_level
        # for meta features
        self.meta_feature_dict = meta_feature_dict
        self.corr_threshold = corr_threshold
        self.feature_names_basic = []
        self.feature_names_cv = []
        self.feature_names = []
        self.has_basic = 1 if self.meta_feature_dict else 0
        logname = "feature_combiner_%s_%s.log"%(feature_name, time_utils._timestamp())
        self.logger = logging_utils._get_logger(config.LOG_DIR, logname)
        if self.feature_level == 2:
            self.splitter = splitter_level2
        elif self.feature_level == 3:
            self.splitter = splitter_level3
        self.n_iter = n_iter
        self.splitter_prev = [0]*self.n_iter 
开发者ID:ChenglongChen,项目名称:kaggle-HomeDepot,代码行数:23,代码来源:feature_combiner.py

示例8: run_ngram_jaccard

# 需要导入模块: import config [as 别名]
# 或者: from config import LOG_DIR [as 别名]
def run_ngram_jaccard():
    logname = "generate_feature_ngram_jaccard_%s.log"%time_utils._timestamp()
    logger = logging_utils._get_logger(config.LOG_DIR, logname)
    dfAll = pkl_utils._load(config.ALL_DATA_LEMMATIZED_STEMMED)

    generators = [JaccardCoef_Ngram, DiceDistance_Ngram]
    obs_fields_list = []
    target_fields_list = []
    obs_fields_list.append( ["search_term", "search_term_product_name", "search_term_alt", "search_term_auto_corrected"][:2] )
    target_fields_list.append( ["product_title", "product_title_product_name", "product_description", "product_attribute", "product_brand", "product_color"] )
    ngrams = [1,2,3,12,123][:3]
    for obs_fields, target_fields in zip(obs_fields_list, target_fields_list):
        for generator in generators:
            for ngram in ngrams:
                param_list = [ngram]
                pf = PairwiseFeatureWrapper(generator, dfAll, obs_fields, target_fields, param_list, config.FEAT_DIR, logger)
                pf.go() 
开发者ID:ChenglongChen,项目名称:kaggle-HomeDepot,代码行数:19,代码来源:feature_distance.py

示例9: run_compression_distance

# 需要导入模块: import config [as 别名]
# 或者: from config import LOG_DIR [as 别名]
def run_compression_distance():
    logname = "generate_feature_compression_distance_%s.log"%time_utils._timestamp()
    logger = logging_utils._get_logger(config.LOG_DIR, logname)
    dfAll = pkl_utils._load(config.ALL_DATA_LEMMATIZED_STEMMED)

    obs_fields_list = []
    target_fields_list = []
    obs_fields_list.append( ["search_term", "search_term_product_name", "search_term_alt", "search_term_auto_corrected"][:2] )
    target_fields_list.append( ["product_title", "product_title_product_name", "product_description", "product_attribute", "product_brand", "product_color"] )
    for obs_fields, target_fields in zip(obs_fields_list, target_fields_list):
        param_list = []
        pf = PairwiseFeatureWrapper(CompressionDistance, dfAll, obs_fields, target_fields, param_list, config.FEAT_DIR, logger)
        pf.go()
        for ngram in ngrams:
            param_list = [ngram, aggregation_mode_prev, aggregation_mode]
            pf = PairwiseFeatureWrapper(CompressionDistance_Ngram, dfAll, obs_fields, target_fields, param_list, config.FEAT_DIR, logger)
            pf.go() 
开发者ID:ChenglongChen,项目名称:kaggle-HomeDepot,代码行数:19,代码来源:feature_distance.py

示例10: main

# 需要导入模块: import config [as 别名]
# 或者: from config import LOG_DIR [as 别名]
def main(options):
    if options.epoch:
        time_str = datetime.datetime.now().isoformat()
        logname = "Eval_[Model@%s]_[Data@%s]_%s.log" % (options.model_name,
                options.data_name, time_str)
        logger = logging_utils._get_logger(config.LOG_DIR, logname)
    else:
        time_str = datetime.datetime.now().isoformat()
        logname = "Final_[Model@%s]_[Data@%s]_%s.log" % (options.model_name,
                options.data_name, time_str)
        logger = logging_utils._get_logger(config.LOG_DIR, logname)
        # logger = logging.getLogger()
        # logging.basicConfig(format='[%(asctime)s] %(levelname)s: %(message)s', level=logging.INFO)
    params_dict = param_space_dict[options.model_name]
    task = Task(options.model_name, options.data_name, options.runs, params_dict, logger)
    if options.save:
        task.save()
    else:
        if options.epoch:
            task.refit()
        else:
            task.evaluate(options.full) 
开发者ID:billy-inn,项目名称:NFETC,代码行数:24,代码来源:eval.py

示例11: __init__

# 需要导入模块: import config [as 别名]
# 或者: from config import LOG_DIR [as 别名]
def __init__(self, test_weight):
        log_dir = os.path.join(cfg.LOG_DIR, 'test')
        test_weight_path = os.path.join(cfg.WEIGHTS_DIR, test_weight)

        with tf.name_scope('input'):
            input_data = tf.placeholder(dtype=tf.float32, name='input_data')
            training = tf.placeholder(dtype=tf.bool, name='training')
        _, _, _, pred_sbbox, pred_mbbox, pred_lbbox = YOLOV3(training).build_nework(input_data)
        with tf.name_scope('summary'):
            tf.summary.FileWriter(log_dir).add_graph(tf.get_default_graph())
        self.__sess = tf.Session()
        net_vars = tf.get_collection('YoloV3')
        saver = tf.train.Saver(net_vars)
        saver.restore(self.__sess, test_weight_path)
        super(Yolo_test, self).__init__(self.__sess, input_data, training, pred_sbbox, pred_mbbox, pred_lbbox)
        print("input_data.name=", input_data.name)
        print("pred_sbbox=", pred_sbbox.name)
        print("pred_mbbox=", pred_mbbox.name)
        print("pred_lbbox=", pred_lbbox.name) 
开发者ID:PINTO0309,项目名称:PINTO_model_zoo,代码行数:21,代码来源:test.py

示例12: _create_feature_conf

# 需要导入模块: import config [as 别名]
# 或者: from config import LOG_DIR [as 别名]
def _create_feature_conf(level, topN, outfile):
    log_folder = "%s/level%d_models"%(config.LOG_DIR, level)
    feature_list = get_model_list(log_folder, topN)
    res = header_pattern%(__file__, level, int(topN), outfile)
    for feature in feature_list:
        res += '"%s",\n'%feature
    res += "]\n"
    with open(os.path.join(config.FEAT_CONF_DIR, outfile), "w") as f:
        f.write(res) 
开发者ID:ChenglongChen,项目名称:kaggle-HomeDepot,代码行数:11,代码来源:get_stacking_feature_conf.py

示例13: run_count

# 需要导入模块: import config [as 别名]
# 或者: from config import LOG_DIR [as 别名]
def run_count():
    logname = "generate_feature_first_last_ngram_count_%s.log"%time_utils._timestamp()
    logger = logging_utils._get_logger(config.LOG_DIR, logname)
    dfAll = pkl_utils._load(config.ALL_DATA_LEMMATIZED_STEMMED)

    generators = [
        FirstIntersectCount_Ngram, 
        LastIntersectCount_Ngram, 
        FirstIntersectRatio_Ngram, 
        LastIntersectRatio_Ngram, 
    ]

    obs_fields_list = []
    target_fields_list = []
    ## query in document
    obs_fields_list.append( ["search_term", "search_term_product_name", "search_term_alt", "search_term_auto_corrected"][:2] )
    target_fields_list.append( ["product_title", "product_title_product_name", "product_description", "product_attribute", "product_brand", "product_color"] )
    ## document in query
    obs_fields_list.append( ["product_title", "product_title_product_name", "product_description", "product_attribute", "product_brand", "product_color"] )
    target_fields_list.append( ["search_term", "search_term_product_name", "search_term_alt", "search_term_auto_corrected"][:2] )
    ngrams = [1,2,3,12,123][:3]
    for obs_fields, target_fields in zip(obs_fields_list, target_fields_list):
        for generator in generators:
            for ngram in ngrams:
                param_list = [ngram]
                pf = PairwiseFeatureWrapper(generator, dfAll, obs_fields, target_fields, param_list, config.FEAT_DIR, logger)
                pf.go() 
开发者ID:ChenglongChen,项目名称:kaggle-HomeDepot,代码行数:29,代码来源:feature_first_last_ngram.py

示例14: main

# 需要导入模块: import config [as 别名]
# 或者: from config import LOG_DIR [as 别名]
def main():
    logname = "generate_feature_group_distance_%s.log"%time_utils._timestamp()
    logger = logging_utils._get_logger(config.LOG_DIR, logname)
    dfAll = pkl_utils._load(config.ALL_DATA_LEMMATIZED_STEMMED)
    dfTrain = dfAll.iloc[:TRAIN_SIZE].copy()

    ## run python3 splitter.py first
    split = pkl_utils._load("%s/splits_level1.pkl"%config.SPLIT_DIR)
    n_iter = len(split)

    relevances_complete = [1, 1.25, 1.33, 1.5, 1.67, 1.75, 2, 2.25, 2.33, 2.5, 2.67, 2.75, 3]
    relevances = [1, 1.33, 1.67, 2, 2.33, 2.67, 3]
    ngrams = [1]
    obs_fields = ["search_term"]
    target_fields = ["product_title", "product_description"]
    aggregation_mode = ["mean", "std", "max", "min", "median"]

    ## for cv
    for i in range(n_iter):
        trainInd, validInd = split[i][0], split[i][1]
        dfTrain2 = dfTrain.iloc[trainInd].copy()
        sub_feature_dir = "%s/Run%d" % (config.FEAT_DIR, i+1)

        for target_field in target_fields:
            for relevance in relevances:
                for ngram in ngrams:
                    param_list = [dfAll["id"], dfTrain2, target_field, relevance, ngram, aggregation_mode]
                    pf = PairwiseFeatureWrapper(GroupRelevance_Ngram_Jaccard, dfAll, obs_fields, [target_field], param_list, sub_feature_dir, logger)
                    pf.go()

    ## for all
    sub_feature_dir = "%s/All" % (config.FEAT_DIR)
    for target_field in target_fields:
        for relevance in relevances:
            for ngram in ngrams:
                param_list = [dfAll["id"], dfTrain, target_field, relevance, ngram, aggregation_mode]
                pf = PairwiseFeatureWrapper(GroupRelevance_Ngram_Jaccard, dfAll, obs_fields, [target_field], param_list, sub_feature_dir, logger)
                pf.go() 
开发者ID:ChenglongChen,项目名称:kaggle-HomeDepot,代码行数:40,代码来源:feature_group_distance.py

示例15: __init__

# 需要导入模块: import config [as 别名]
# 或者: from config import LOG_DIR [as 别名]
def __init__(self, model_folder, model_list, subm_prefix, 
                weight_opt_max_evals=10, w_min=-1., w_max=1., 
                inst_subsample=0.5, inst_subsample_replacement=False, 
                inst_splitter=None,
                model_subsample=1.0, model_subsample_replacement=True,
                bagging_size=10, init_top_k=5, epsilon=0.00001, 
                multiprocessing=False, multiprocessing_num_cores=1,
                enable_extreme=True, random_seed=0):

        self.model_folder = model_folder
        self.model_list = model_list
        self.subm_prefix = subm_prefix
        self.weight_opt_max_evals = weight_opt_max_evals
        self.w_min = w_min
        self.w_max = w_max
        assert inst_subsample > 0 and inst_subsample <= 1.
        self.inst_subsample = inst_subsample
        self.inst_subsample_replacement = inst_subsample_replacement
        self.inst_splitter = inst_splitter
        assert model_subsample > 0
        assert (type(model_subsample) == int) or (model_subsample <= 1.)
        self.model_subsample = model_subsample
        self.model_subsample_replacement = model_subsample_replacement
        self.bagging_size = bagging_size
        self.init_top_k = init_top_k
        self.epsilon = epsilon
        self.multiprocessing = multiprocessing
        self.multiprocessing_num_cores = multiprocessing_num_cores
        self.enable_extreme = enable_extreme
        self.random_seed = random_seed
        logname = "ensemble_selection_%s.log"%time_utils._timestamp()
        self.logger = logging_utils._get_logger(config.LOG_DIR, logname)
        self.n_models = len(self.model_list) 
开发者ID:ChenglongChen,项目名称:kaggle-HomeDepot,代码行数:35,代码来源:extreme_ensemble_selection.py


注:本文中的config.LOG_DIR属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。