当前位置: 首页>>代码示例>>Python>>正文


Python Fields.field_id方法代码示例

本文整理汇总了Python中bigml.fields.Fields.field_id方法的典型用法代码示例。如果您正苦于以下问题:Python Fields.field_id方法的具体用法?Python Fields.field_id怎么用?Python Fields.field_id使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在bigml.fields.Fields的用法示例。


在下文中一共展示了Fields.field_id方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_kfold_datasets_file

# 需要导入模块: from bigml.fields import Fields [as 别名]
# 或者: from bigml.fields.Fields import field_id [as 别名]
def create_kfold_datasets_file(args, api, common_options, resume=False):
    """Create the kfold dataset resources and store their ids in a file
       one per line

    """
    message = ('Creating the kfold datasets............\n')
    u.log_message(message, log_file=session_file, console=args.verbosity)
    if args.output_dir is None:
        args.output_dir = a.NOW
    # retrieve dataset
    dataset_id = bigml.api.get_dataset_id(args.dataset)
    if dataset_id:
        dataset = api.check_resource(dataset_id, api.get_dataset)
        # check that kfold_field is unique
        fields = Fields(dataset, {"objective_field": args.objective_field,
                                  "objective_field_present": True})
        objective_id = fields.field_id(fields.objective_field)
        kfold_field_name = avoid_duplicates(DEFAULT_KFOLD_FIELD, fields)
        # create jsons to generate partial datasets
        selecting_file_list, resume = create_kfold_json(args, kfold_field_name,
                                                        objective_id,
                                                        resume=resume) 
        # generate test datasets
        datasets_file, resume = create_kfold_datasets(dataset_id, args,
                                                      selecting_file_list,
                                                      fields.objective_field,
                                                      kfold_field_name,
                                                      common_options,
                                                      resume=resume)
        return datasets_file, fields.field_column_number(objective_id), resume
    return None, None, None    
开发者ID:chunhungChou,项目名称:bigmler,代码行数:33,代码来源:k_fold_cv.py

示例2: best_first_search

# 需要导入模块: from bigml.fields import Fields [as 别名]
# 或者: from bigml.fields.Fields import field_id [as 别名]
def best_first_search(datasets_file, api, args, common_options,
                      staleness=None, penalty=None, objective_name=None,
                      resume=False):
    """Selecting the fields to be used in the model construction

    """
    counter = 0
    loop_counter = 0
    features_file = os.path.normpath(os.path.join(args.output_dir,
                                                  FEATURES_LOG))
    with open(features_file, u.open_mode("w")) as features_handler:
        features_writer = csv.writer(features_handler, lineterminator="\n")
        features_writer.writerow([
            "step", "state", "score", "metric_value", "best_score"])
        features_handler.flush()
        if staleness is None:
            staleness = DEFAULT_STALENESS
        if penalty is None:
            penalty = DEFAULT_PENALTY
        # retrieving the first dataset in the file
        try:
            with open(datasets_file, u.open_mode("r")) as datasets_handler:
                dataset_id = datasets_handler.readline().strip()
        except IOError, exc:
            sys.exit("Could not read the generated datasets file: %s" %
                     str(exc))
        try:
            stored_dataset = u.storage_file_name(args.output_dir, dataset_id)
            with open(stored_dataset, u.open_mode("r")) as dataset_handler:
                dataset = json.loads(dataset_handler.read())
        except IOError:
            dataset = api.check_resource(dataset_id,
                                         query_string=ALL_FIELDS_QS)
        # initial feature set
        fields = Fields(dataset)
        excluded_features = ([] if args.exclude_features is None else
                             args.exclude_features.split(
                                 args.args_separator))
        try:
            excluded_ids = [fields.field_id(feature) for
                            feature in excluded_features]
            objective_id = fields.field_id(objective_name)
        except ValueError, exc:
            sys.exit(exc)
开发者ID:ASA-Pitts,项目名称:bigmler,代码行数:46,代码来源:k_fold_cv.py

示例3: create_kfold_datasets_file

# 需要导入模块: from bigml.fields import Fields [as 别名]
# 或者: from bigml.fields.Fields import field_id [as 别名]
def create_kfold_datasets_file(args, api, common_options, resume=False):
    """Create the kfold dataset resources and store their ids in a file
       one per line

    """
    message = ('Creating the kfold datasets............\n')
    u.log_message(message, log_file=session_file, console=args.verbosity)
    if args.output_dir is None:
        args.output_dir = a.NOW
    # retrieve dataset
    dataset_id = bigml.api.get_dataset_id(args.dataset)
    if dataset_id:
        dataset = api.check_resource(dataset_id)
        try:
            args.objective_field = int(args.objective_field)
        except (TypeError, ValueError):
            pass
        # if the user provided no objective field, try to use the one in the
        # dataset
        if args.objective_field is None:
            try:
                args.objective_field = dataset['object'][
                    'objective_field']['column_number']
            except KeyError:
                pass
        # check that kfold_field is unique
        fields = Fields(dataset, objective_field=args.objective_field,
                        objective_field_present=True)
        try:
            objective_id = fields.field_id(fields.objective_field)
            objective_name = fields.field_name(objective_id)
        except ValueError, exc:
            sys.exit(exc)
        kfold_field_name = avoid_duplicates(DEFAULT_KFOLD_FIELD, fields)
        # create jsons to generate partial datasets
        selecting_file_list, resume = create_kfold_json(args, kfold_field_name,
                                                        objective_id,
                                                        resume=resume)
        # generate test datasets
        datasets_file, resume = create_kfold_datasets(dataset_id, args,
                                                      selecting_file_list,
                                                      objective_name,
                                                      common_options,
                                                      resume=resume)
        return datasets_file, objective_name, resume
开发者ID:cheesinglee,项目名称:bigmler,代码行数:47,代码来源:k_fold_cv.py

示例4: create_kfold_datasets_file

# 需要导入模块: from bigml.fields import Fields [as 别名]
# 或者: from bigml.fields.Fields import field_id [as 别名]
def create_kfold_datasets_file(args, api, common_options, resume=False):
    """Create the kfold dataset resources and store their ids in a file
       one per line

    """
    message = ('Creating the kfold datasets............\n')
    u.log_message(message, log_file=session_file, console=args.verbosity)
    if args.output_dir is None:
        args.output_dir = a.NOW

    csv_properties = {}
    fields = None
    dataset = None
    datasets = []
    if args.dataset_file:
        # dataset is retrieved from the contents of the given local JSON file
        model_dataset, csv_properties, fields = u.read_local_resource(
            args.dataset_file,
            csv_properties=csv_properties)
        if not args.datasets:
            datasets = [model_dataset]
            dataset = model_dataset
        else:
            datasets = u.read_datasets(args.datasets)
        dataset_id = dataset['resource']
    elif args.dataset:
        dataset_id = bigml.api.get_dataset_id(args.dataset)
        datasets = [dataset_id]
    elif args.dataset_ids:
        datasets = args.dataset_ids
        dataset_id = datasets[0]

    if dataset_id:
        if not dataset:
            dataset = api.check_resource(dataset_id,
                                         query_string=ALL_FIELDS_QS)
        try:
            args.objective_field = int(args.objective_field)
        except (TypeError, ValueError):
            pass
        # if the user provided no objective field, try to use the one in the
        # dataset
        if args.objective_field is None:
            try:
                args.objective_field = dataset['object'][
                    'objective_field']['column_number']
            except KeyError:
                pass
        # check that kfold_field is unique
        fields = Fields(dataset, objective_field=args.objective_field,
                        objective_field_present=True)
        if args.random_fields:
            default_candidates_limits(args, fields)
        try:
            objective_id = fields.field_id(fields.objective_field)
            objective_name = fields.field_name(objective_id)
        except ValueError, exc:
            sys.exit(exc)
        kfold_field_name = avoid_duplicates(DEFAULT_KFOLD_FIELD, fields)
        # create jsons to generate partial datasets
        selecting_file_list, resume = create_kfold_json(args, kfold_field_name,
                                                        objective_id,
                                                        resume=resume)
        # generate test datasets
        datasets_file, resume = create_kfold_datasets(dataset_id, args,
                                                      selecting_file_list,
                                                      common_options,
                                                      resume=resume)
        return datasets_file, objective_name, resume
开发者ID:weaver-viii,项目名称:bigmler,代码行数:71,代码来源:k_fold_cv.py

示例5: str

# 需要导入模块: from bigml.fields import Fields [as 别名]
# 或者: from bigml.fields.Fields import field_id [as 别名]
     sys.exit("Could not read the generated datasets file: %s" %
              str(exc))
 try:
     stored_dataset = u.storage_file_name(args.output_dir, dataset_id)
     with open(stored_dataset, u.open_mode("r")) as dataset_handler:
         dataset = json.loads(dataset_handler.read())
 except IOError:
     dataset = api.check_resource(dataset_id,
                                  query_string=ALL_FIELDS_QS)
 # initial feature set
 fields = Fields(dataset)
 excluded_features = ([] if args.exclude_features is None else
                      args.exclude_features.split(
                          args.args_separator))
 try:
     excluded_ids = [fields.field_id(feature) for
                     feature in excluded_features]
     objective_id = fields.field_id(objective_name)
 except ValueError, exc:
     sys.exit(exc)
 field_ids = [field_id for field_id in fields.preferred_fields()
              if field_id != objective_id and
              not field_id in excluded_ids]
 # headers are extended with a column per field
 fields_names = [fields.field_name(field_id) for field_id in field_ids]
 features_header.extend(fields_names)
 features_writer.writerow(features_header)
 initial_state = [False for field_id in field_ids]
 open_list = [(initial_state, - float('inf'), -float('inf'), 0)]
 closed_list = []
 best_state, best_score, best_metric_value, best_counter = open_list[0]
开发者ID:weaver-viii,项目名称:bigmler,代码行数:33,代码来源:k_fold_cv.py

示例6: compute_output

# 需要导入模块: from bigml.fields import Fields [as 别名]
# 或者: from bigml.fields.Fields import field_id [as 别名]

#.........这里部分代码省略.........
                        source['object']['source_parser']['locale'])
        source_file = open(path + '/source', 'w', 0)
        source_file.write("%s\n" % source['resource'])
        source_file.write("%s\n" % source['object']['name'])
        source_file.flush()
        source_file.close()

    # If a source is provided, we retrieve it.
    elif args.source:
        message = u.dated("Retrieving source. %s\n" %
                          u.get_url(args.source, api))
        u.log_message(message, log_file=session_file, console=args.verbosity)
        source = api.get_source(args.source)

    # If we already have source, we check that is finished and extract the
    # fields, and update them if needed.
    if source:
        if source['object']['status']['code'] != bigml.api.FINISHED:
            message = u.dated("Retrieving source. %s\n" %
                              u.get_url(source, api))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            source = api.check_resource(source, api.get_source)
        csv_properties = {'missing_tokens':
                          source['object']['source_parser']['missing_tokens'],
                          'data_locale':
                          source['object']['source_parser']['locale']}

        fields = Fields(source['object']['fields'], **csv_properties)
        update_fields = {}
        if field_attributes:
            for (column, value) in field_attributes.iteritems():
                update_fields.update({
                    fields.field_id(column): value})
            message = u.dated("Updating source. %s\n" %
                              u.get_url(source, api))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            source = api.update_source(source, {"fields": update_fields})

        update_fields = {}
        if types:
            for (column, value) in types.iteritems():
                update_fields.update({
                    fields.field_id(column): {'optype': value}})
            message = u.dated("Updating source. %s\n" %
                              u.get_url(source, api))
            u.log_message(message, log_file=session_file,
                          console=args.verbosity)
            source = api.update_source(source, {"fields": update_fields})

    if (training_set or args.source or (args.evaluate and test_set)):
        if resume:
            resume, args.dataset = u.checkpoint(u.is_dataset_created, path,
                                                bigml.api,
                                                debug=args.debug)
            if not resume:
                message = u.dated("Dataset not found. Resuming.\n")
                u.log_message(message, log_file=session_file,
                              console=args.verbosity)
    # If we have a source but not dataset or model has been provided, we
    # create a new dataset if the no_dataset option isn't set up. Also
    # if evaluate is set and test_set has been provided.
    if ((source and not args.dataset and not args.model and not model_ids and
            not args.no_dataset) or
            (args.evaluate and args.test_set and not args.dataset)):
开发者ID:BigData-Tools,项目名称:bigmler,代码行数:70,代码来源:bigmler.py

示例7: SymptomInsert

# 需要导入模块: from bigml.fields import Fields [as 别名]
# 或者: from bigml.fields.Fields import field_id [as 别名]
    def SymptomInsert(self, model):

        session = model.key.get()

        if session is None:
            raise endpoints.NotFoundException('Session not found.')             

        if session.symptoms is None :
            session.symptoms = Symptoms()

        for s in session.symptoms.items :
            if s.name == model.name :
                s.value = model.value
                break
        else :
            symptom = Symptom(name=model.name, value=model.value)
            session.symptoms.items.append(symptom)

        logging.debug('starting prediction')        

        p = {}
        
        for symptom in session.symptoms.items:
            p[symptom.name] = symptom.value
                    
        bigml_local_model = bigml_model.get_local_model()
        
        prediction = bigml_local_model.predict(p, add_confidence=True, add_path=True, add_distribution=True, add_count=True, add_next=True)

        prediction_all = bigml_local_model.predict(p, multiple=5)
        
        if prediction['next'] is not None :
            logging.debug('got fields %s' % bigml_local_model.fields)

            fields = Fields(bigml_local_model.fields)
            field_id = fields.field_id(prediction['next'])
            field = bigml_local_model.fields[field_id]

            if 'label' in field :
                label = field['label']
            else :
                label = field['name']

            if 'description' in field :
                description = field['description']
            else :
                description = ''

            if 'categories' in field['summary'] :
                
                cat = []
                for c in field['summary']['categories'] :
                    cat.append(c[0])
                
                session.next = Question(label=label, description=description, type=field['optype'], categories=cat)
            else:
                session.next = Question(label=label, description=description, type=field['optype'])

        else :
            session.next = None
            
        session.outcome = Outcome(name=prediction['prediction'], confidence=str(prediction['confidence']), full=prediction_all)        
        session.put()
        
        return session
开发者ID:HubertFoxchase,项目名称:health-expert,代码行数:67,代码来源:api2.py

示例8: main

# 需要导入模块: from bigml.fields import Fields [as 别名]
# 或者: from bigml.fields.Fields import field_id [as 别名]
def main(args=sys.argv[1:]):
    """Parses command-line parameters and calls the actual main function.

    """
    parser = argparse.ArgumentParser(
        description="Dataset analysis",
        epilog="BigML, Inc")

    # source with activity data
    parser.add_argument('--source',
                        action='store',
                        dest='source',
                        default=None,
                        help="Full path to file")

    # create private links or not
    parser.add_argument('--share',
                        action='store_true',
                        default=False,
                        help="Share created resources or not")

    # weight models or not
    parser.add_argument('--balance',
                        action='store_true',
                        default=False,
                        help="Weight model or not")

    args = parser.parse_args(args)

    if not args.source:
        sys.exit("You need to provide a valid path to a source")

    api = BigML()

    name = "Sean's activity"

    log("Creating source...")
    source_args = {'name': name}
    source = api.create_source(args.source, source_args)
    if not api.ok(source):
        sys.exit("Source isn't ready...")

    log("Creating dataset...")
    dataset = api.create_dataset(source)
    if not api.ok(dataset):
        sys.exit("Dataset isn't ready...")

    log("Transforming dataset...")
    # Extends dataset with new field for previous activity, previous duration,
    # start day, and start hour. Removes first column, start, and end fields.
    new_dataset_args = {
        'name': name,
        'new_fields': new_fields(),
        'all_but': excluded_fields()}
    new_dataset = api.create_dataset(dataset, new_dataset_args)
    if not api.ok(new_dataset):
        sys.exit("Dataset isn't ready...")

    # Set objective field to activity
    fields = Fields(new_dataset['object']['fields'])
    objective_id = fields.field_id('activity')
    new_dataset_args = {
        'objective_field': {'id': objective_id}}
    new_dataset = api.update_dataset(new_dataset, new_dataset_args)

    # Create training and test set for evaluation
    log("Splitting dataset...")
    training, test = train_test_split(api, new_dataset)

    log("Creating a model using the training dataset...")
    model_args = {
        'objective_field': objective_id,
        'balance_objective': args.balance,
        'name': training['object']['name']}
    model = api.create_model(training, model_args)
    if not api.ok(model):
        sys.exit("Model isn't ready...")

    # Creating an evaluation
    log("Evaluating model against the test dataset...")
    eval_args = {
        'name': name + ' - 80% vs 20%'}
    evaluation = api.create_evaluation(model, test, eval_args)
    if not api.ok(evaluation):
        sys.exit("Evaluation isn't ready...")

    log("Creating model for the full dataset...")
    model = api.create_model(new_dataset, model_args)
    if not api.ok(model):
        sys.exit("Model isn't ready...")

    # Create private links
    if args.share:
        log("Sharing resources...")
        dataset_private_link = share_dataset(api, new_dataset)
        model_private_link = share_model(api, model)
        evaluation_private_link = share_evaluation(api, evaluation)
        log(dataset_private_link)
        log(model_private_link)
        log(evaluation_private_link)
开发者ID:aficionado,项目名称:nextactivity,代码行数:102,代码来源:next_activity.py

示例9: best_first_search

# 需要导入模块: from bigml.fields import Fields [as 别名]
# 或者: from bigml.fields.Fields import field_id [as 别名]
def best_first_search(datasets_file, api, args, common_options,
                      staleness=None, penalty=None, objective_name=None,
                      resume=False):
    """Selecting the fields to be used in the model construction

    """
    counter = 0
    loop_counter = 0
    features_file = os.path.normpath(os.path.join(args.output_dir,
                                                  FEATURES_LOG))
    with open(features_file, 'w', 0) as features_handler:      
        features_writer = csv.writer(features_handler, lineterminator="\n")
        features_writer.writerow([
            "step", "state", "score", "metric_value", "best_score"])
        features_handler.flush()
        if staleness is None:
            staleness = DEFAULT_STALENESS
        if penalty is None:
            penalty = DEFAULT_PENALTY
        # retrieving the first dataset in the file
        try:
            with open(datasets_file) as datasets_handler:
                dataset_id = datasets_handler.readline().strip()
        except IOError, exc:
            sys.exit("Could not read the generated datasets file: %s" %
                     str(exc))
        dataset = api.check_resource(dataset_id, api.get_dataset)
        # initial feature set
        fields = Fields(dataset)
        excluded_features = ([] if args.exclude_features is None else
                             args.exclude_features.split(
                                 args.args_separator))
        excluded_ids = [fields.field_id(feature) for
                        feature in excluded_features]
        objective_id = fields.field_id(objective_name)
        field_ids = [field_id for field_id in fields.preferred_fields()
                     if field_id != objective_id and
                     not field_id in excluded_ids]
        initial_state = [False for field_id in field_ids]
        open_list = [(initial_state, - float('inf'), -float('inf'))]
        closed_list = []
        best_state, best_score, best_metric_value = open_list[0]
        best_unchanged_count = 0
        metric = args.maximize
        while best_unchanged_count < staleness and open_list:
            loop_counter += 1
            features_set = find_max_state(open_list)
            state, score, metric_value = features_set
            features_writer.writerow([
                loop_counter, [int(in_set) for in_set in state],
                score, metric_value, best_score])
            features_handler.flush()
            state_fields = [fields.field_name(field_ids[index])
                            for (index, in_set) in enumerate(state) if in_set]
            closed_list.append(features_set)
            open_list.remove(features_set)
            if (score - EPSILON) > best_score:
                best_state, best_score, best_metric_value = features_set
                best_unchanged_count = 0
                if state_fields:
                    message = 'New best state: %s\n' % (state_fields)
                    u.log_message(message, log_file=session_file,
                                  console=args.verbosity)
                    if metric in PERCENT_EVAL_METRICS:
                        message = '%s = %0.2f%% (score = %s)\n' % (
                            metric.capitalize(), metric_value * 100, score)
                    else:
                        message = '%s = %f (score = %s)\n' % (
                            metric.capitalize(),metric_value, score)
                    u.log_message(message, log_file=session_file,
                                  console=args.verbosity)
            else:
                best_unchanged_count += 1

            children = expand_state(state)
            for child in children:
                if (child not in [state for state, _, _ in open_list] and
                        child not in [state for state, _, _ in closed_list]):
                    input_fields = [fields.field_name(field_id)
                                    for (i, field_id)
                                    in enumerate(field_ids) if child[i]]
                    # create models and evaluation with input_fields
                    args.model_fields = args.args_separator.join(input_fields)
                    counter += 1
                    (score,
                     metric_value,
                     metric,
                     resume) = kfold_evaluate(datasets_file,
                                              args, counter, common_options,
                                              penalty=penalty, resume=resume,
                                              metric=metric)
                    open_list.append((child, score, metric_value))

        best_features = [fields.field_name(field_ids[i]) for (i, score)
                         in enumerate(best_state) if score]
        message = (u'The best feature subset is: %s \n'
                   % u", ".join(best_features))
        u.log_message(message, log_file=session_file, console=1)
        if metric in PERCENT_EVAL_METRICS:
            message = (u'%s = %0.2f%%\n' % (metric.capitalize(),
#.........这里部分代码省略.........
开发者ID:jinqiushang,项目名称:bigmler,代码行数:103,代码来源:k_fold_cv.py

示例10: open

# 需要导入模块: from bigml.fields import Fields [as 别名]
# 或者: from bigml.fields.Fields import field_id [as 别名]
 """
 counter = 0
 if staleness is None:
     staleness = DEFAULT_STALENESS
 if penalty is None:
     penalty = DEFAULT_PENALTY
 # retrieving the first dataset in the file
 try:
     with open(datasets_file) as datasets_handler:
         dataset_id = datasets_handler.readline().strip()
 except IOError, exc:
     sys.exit("Could not read the generated datasets file: %s" % str(exc))
 dataset = api.check_resource(dataset_id, api.get_dataset)
 # initial feature set
 fields = Fields(dataset)
 objective_id = fields.field_id(objective_column)
 field_ids = [field_id for field_id in fields.preferred_fields()
              if field_id != objective_id]
 initial_state = [False for field_id in field_ids]
 open_list = [(initial_state,0)]
 closed_list = []
 best_score = -1
 best_unchanged_count = 0
 metric = args.maximize
 while best_unchanged_count < staleness and open_list:
     (state, score) = find_max_state(open_list)
     state_fields = [fields.field_name(field_ids[i])
                     for (i, val) in enumerate(state) if val]
     closed_list.append((state, score))
     open_list.remove((state, score))
     if (score - EPSILON) > best_score:
开发者ID:chunhungChou,项目名称:bigmler,代码行数:33,代码来源:k_fold_cv.py


注:本文中的bigml.fields.Fields.field_id方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。