当前位置: 首页>>代码示例>>Python>>正文


Python Evaluator.run方法代码示例

本文整理汇总了Python中evaluator.Evaluator.run方法的典型用法代码示例。如果您正苦于以下问题:Python Evaluator.run方法的具体用法?Python Evaluator.run怎么用?Python Evaluator.run使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在evaluator.Evaluator的用法示例。


在下文中一共展示了Evaluator.run方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __main__

# 需要导入模块: from evaluator import Evaluator [as 别名]
# 或者: from evaluator.Evaluator import run [as 别名]
def __main__(argv):
    #%%
    logger = logging.getLogger(__name__)
    logger.info("VECTOR MODEL INFORMATION RETRIEVAL SYSTEM START")    
    
    gli = InvertedIndexGenerator(GLI_CONFIG_FILE)
    gli.run()
    gli.write_output()
    
    index = Indexer(INDEX_CONFIG_FILE, TfidfVectorizer)
    index.run()
    index.write_output()
    
    pc = QueryProcessor(PC_CONFIG_FILE)
    pc.run()
    pc.write_output()
    
    buscador = SearchEngine(BUSCA_CONFIG_FILE, TfidfVectorizer)
    buscador.run()
    buscador.write_output()
    #%%
    avaliador = Evaluator(AVAL_CONFIG_FILE)
    avaliador.run()
    avaliador.write_output()
    
    logger.info("VECTOR MODEL INFORMATION RETRIEVAL SYSTEM DONE")     
开发者ID:ygorcanalli,项目名称:bri-2015-01,代码行数:28,代码来源:__main__.py

示例2: main

# 需要导入模块: from evaluator import Evaluator [as 别名]
# 或者: from evaluator.Evaluator import run [as 别名]
def main(argv=None):
    config = SafeConfigParser()
    config.read(cmd_args.config_path)
    if cmd_args.restore_checkpoint:
        print('Skipping training phase, loading model checkpoint from: ', 
            config.get('main', 'checkpoint_path'))

    # Get the data.
    train_data_filename = utils.maybe_download(config, 
        config.get('data', 'train_data_filename'))
    train_labels_filename = utils.maybe_download(config, 
        config.get('data', 'train_labels_filename'))
    test_data_filename = utils.maybe_download(config, 
        config.get('data', 'test_data_filename'))
    test_labels_filename = utils.maybe_download(config, 
        config.get('data', 'test_labels_filename'))

    # Extract it into np arrays.
    train_data = utils.extract_data(config, train_data_filename, 60000)
    train_labels = utils.extract_labels(train_labels_filename, 60000)
    test_data = utils.extract_data(config, test_data_filename, 10000)
    test_labels = utils.extract_labels(test_labels_filename, 10000)

    validation_size = config.getint('main', 'validation_size')
    num_epochs = config.getint('main', 'num_epochs')

    # Generate a validation set.
    validation_data = train_data[:validation_size, ...]
    validation_labels = train_labels[:validation_size]
    train_data = train_data[validation_size:, ...]
    train_labels = train_labels[validation_size:]
    num_epochs = num_epochs
    train_size = train_labels.shape[0]

    lenet5 = LeNet5(config)

    x, y_ = lenet5.train_input_placeholders()
    y_conv, logits, keep_prob, param_dict = lenet5.model(x)

    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(logits, y_))

    # L2 regularization for the fully connected parameters.
    regularizers = (tf.nn.l2_loss(param_dict['fc1_W']) 
                  + tf.nn.l2_loss(param_dict['fc1_b']) 
                  + tf.nn.l2_loss(param_dict['fc2_W']) 
                  + tf.nn.l2_loss(param_dict['fc2_b']))
    # Add the regularization term to the loss.
    loss += 5e-4 * regularizers

    # Optimizer: set up a variable that's incremented once 
    # per batch and controls the learning rate decay.
    batch = tf.Variable(0, dtype=tf.float32)

    # Decay once per epoch, using an exponential schedule starting at 0.01.
    learning_rate = tf.train.exponential_decay(
        0.01,
        batch * config.getint('main', 'batch_size'),
        train_size,
        0.95,
        staircase=True)

    optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9) \
        .minimize(loss, global_step=batch)

    input_dict = {
        "x": x,
        "y_": y_,
        "y_conv": y_conv,
        "keep_prob": keep_prob,
        "train_data": train_data,
        "train_labels": train_labels,
        "test_data": test_data,
        "test_labels": test_labels,
        "validation_data": validation_data,
        "validation_labels": validation_labels,
        "num_epochs": num_epochs,
        "train_size": train_size
    }

    saver = tf.train.Saver(tf.all_variables())

    evaluator = Evaluator(cmd_args, config, optimizer, 
        learning_rate, loss, saver)
    evaluator.run(input_dict)

    fastgradientsign_advgen = FastGradientSign_AdvGen(cmd_args, [1, 28, 28, 1], saver, config)
    adv_out_df = fastgradientsign_advgen.run(input_dict)

    pkl_path = config.get('main', 'pickle_filepath')
    utils.ensure_dir(os.path.dirname(pkl_path))
    with open(pkl_path, "wb") as pkl:
        pickle.dump(adv_out_df, pkl)
开发者ID:BenJamesbabala,项目名称:deep-pwning,代码行数:95,代码来源:mnist_driver.py

示例3: main

# 需要导入模块: from evaluator import Evaluator [as 别名]
# 或者: from evaluator.Evaluator import run [as 别名]
def main(argv=None):
    config = SafeConfigParser()
    config.read(cmd_args.config_path)
    if cmd_args.restore_checkpoint:
        print('Skipping training phase, loading model checkpoint from: ', 
            config.get('main', 'checkpoint_path'))

    x_text, y = load_data_and_labels()

    # Build vocabulary
    # https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/learn/python/learn/preprocessing/text.py
    max_document_length = max([len(x.split(" ")) for x in x_text])
    vocab_processor = learn.preprocessing.VocabularyProcessor(max_document_length)
    x = np.array(list(vocab_processor.fit_transform(x_text)))

    # Randomly shuffle data
    if config.get('main', 'seed') == 'None':
        seed = None
    else:
        seed = config.getint('main', 'seed')
    np.random.seed(seed)
    shuffle_indices = np.random.permutation(np.arange(len(y)))
    x_shuffled = x[shuffle_indices]
    y_shuffled = y[shuffle_indices]

    # Split train/test set
    x_train, x_eval, x_test = x_shuffled[:-2000], x_shuffled[-2000:-1000], x_shuffled[-1000:]
    y_train, y_eval, y_test = y_shuffled[:-2000], y_shuffled[-2000:-1000], y_shuffled[-1000:]
    print("Vocabulary Size: {:d}".format(len(vocab_processor.vocabulary_)))
    print("Train/Dev/Test split: {:d}/{:d}/{:d}".format(len(y_train), len(y_eval), len(y_test)))

    semantic_cnn = SemanticCNN(config, x_train.shape[1],
                               len(vocab_processor.vocabulary_), 128, 128)

    x, y_ = semantic_cnn.train_input_placeholders()
    y_conv, logits, keep_prob, l2_loss, embedded_words, embed_W = semantic_cnn.model(x)

    loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits, y_))

    # Add the regularization term to the loss.
    loss += 5e-4 * l2_loss

    learning_rate = tf.Variable(tf.constant(1e-3), dtype=tf.float32)
    global_step = tf.Variable(0, name="global_step", trainable=False)
    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step=global_step)

    pred_labels = tf.argmax(y_conv, 1, name="pred_labels")
    correct_predictions = tf.equal(pred_labels, tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
    cross_entropy = -tf.reduce_sum(tf.cast(y_, "float") * tf.log(y_conv))
    grad = tf.gradients(cross_entropy, embedded_words)

    input_dict = {
        "x": x,
        "y_": y_,
        "y_conv": y_conv,
        "keep_prob": keep_prob,
        "train_data": x_train,
        "train_labels": y_train,
        "test_data": x_test,
        "test_labels": y_test,
        "validation_data": x_eval,
        "validation_labels": y_eval,
        "num_epochs": config.getint('main', 'num_epochs'),
        "train_size": len(y_train),
        "embedded_words": embedded_words,
        "vocab_processor": vocab_processor,
        "embed_W": embed_W
    }

    saver = tf.train.Saver()
    evaluator = Evaluator(cmd_args, config, optimizer, 
        learning_rate, loss, saver, onehot_labels=True)
    evaluator.run(input_dict)

    wordvec_advgen = WordVec_AdvGen(cmd_args, saver, config)
    wordvec_advgen.run(input_dict)
开发者ID:BenJamesbabala,项目名称:deep-pwning,代码行数:80,代码来源:semantic_driver.py

示例4: run_cnn

# 需要导入模块: from evaluator import Evaluator [as 别名]
# 或者: from evaluator.Evaluator import run [as 别名]
def run_cnn(model_params, optimization_params, dataset_path, dataset_params, filename_params, visual_params, epochs, verbose=False):
    print(filename_params)
    if not os.path.exists(filename_params.results):
        os.makedirs(filename_params.results)

    is_config, config_values = interface.command.get_command("-config")
    is_curriculum, curriculum_set = interface.command.get_command("-curriculum")
    is_batch_run, batch_index = interface.command.get_command("-batch", default="0")
    is_init_params, param_path = interface.command.get_command("-params")

    if is_config:
        #Assume  config is speficially for running bootstrapping batches.
        config_arr = eval(config_values)
        if len(config_arr) == 2:
            loss_function = config_arr[0]
            label_noise = float(config_arr[1])
            dataset_params.label_noise = label_noise
            model_params.loss = loss_function
            batch_index = loss_function + "-" + str(label_noise) + "-" + batch_index
            print(batch_index)

    if is_curriculum:
        dataset_path = curriculum_set

    weights = None
    if is_init_params:
        store = ParamStorage()
        if not param_path:
            param_path = "./results/params.pkl"
        weights = store.load_params(path=param_path)['params']


    dataset = DataLoader.create()
    dataset.load(dataset_path, dataset_params, optimization_params.batch_size) #Input stage
    model = ConvModel(model_params, verbose=True) #Create network stage

    evaluator = Evaluator(model, dataset, optimization_params, dataset_path)
    evaluator.run(epochs=epochs,  verbose=verbose, init=weights)
    report = evaluator.get_result()
    network_store_path = filename_params.network_save_name
    result_path = filename_params.results + "/results.json"
    if is_batch_run:
        network_store_path = filename_params.results + "/batch" + batch_index +  ".pkl"
        result_path =filename_params.results + "/batch" + batch_index +  ".json"

    storage = ParamStorage(path=network_store_path)
    storage.store_params(model.params)

    dataset.destroy()

    if visual_params.gui_enabled:
         interface.server.stop_job(report)

    printing.print_section('Evaluation precision and recall')

    prc = PrecisionRecallCurve(pr_path, model.params, model_params, dataset_params)
    test_datapoints = prc.get_curves_datapoints(optimization_params.batch_size, set_name="test")
    valid_datapoints = prc.get_curves_datapoints(optimization_params.batch_size, set_name="valid")
    #Stores the model params. Model can later be restored.
    printing.print_section('Storing model parameters')

    if visual_params.gui_enabled:
        interface.server.send_precision_recall_data(test_datapoints, valid_datapoints)
    storage.store_result(result_path, evaluator.events, test_datapoints, valid_datapoints)
开发者ID:olavvatne,项目名称:CNN,代码行数:66,代码来源:cnn.py

示例5: wait_for_completion

# 需要导入模块: from evaluator import Evaluator [as 别名]
# 或者: from evaluator.Evaluator import run [as 别名]
    s.notify=s.simple_notify
    s.start()

    if sys.argv[2:]:
        # Timestamps have been specified. Non-interactive version.
        s.enqueue( *(int(t) for t in sys.argv[2:]) )

        loop=GObject.MainLoop()
        def wait_for_completion():
            if s.timestamp_queue.empty():
                # Quit application
                s.snapshot_ready.wait()
                loop.quit()
            return True
        GLib.idle_add(wait_for_completion)
        loop.run()
    else:
        if Evaluator is None:
            logger.warn("Missing evaluator module.\nFetch it from the repository")
            sys.exit(0)

        # Adding the following lines breaks the code, with a warning:
        #    sys:1: Warning: cannot register existing type `GstSelectorPad'
        #    sys:1: Warning: g_object_new: assertion `G_TYPE_IS_OBJECT (object_type)' failed
        #pipe=Gst.parse_launch('playbin uri=file:///media/video/Bataille.avi')
        #pipe.set_state(Gst.State.PLAYING)

        ev=Evaluator(globals_=globals(), locals_=locals())
        ev.set_expression('s.enqueue(12000, 24000, 36000)')
        ev.run()
开发者ID:oaubert,项目名称:advene,代码行数:32,代码来源:snapshotter.py


注:本文中的evaluator.Evaluator.run方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。