当前位置: 首页>>代码示例>>Python>>正文


Python publisher.info函数代码示例

本文整理汇总了Python中pubsublogger.publisher.info函数的典型用法代码示例。如果您正苦于以下问题:Python info函数的具体用法?Python info怎么用?Python info使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了info函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

def main():
    """Main Function"""

    # CONFIG #
    cfg = ConfigParser.ConfigParser()
    cfg.read('./packages/config.cfg')

    # REDIS #
    r_serv = redis.StrictRedis(
        host = cfg.get("Redis_Queues", "host"),
        port = cfg.getint("Redis_Queues", "port"),
        db = cfg.getint("Redis_Queues", "db"))

    # LOGGING #
    publisher.channel = "Global"

    # ZMQ #
    PubGlob = ZMQ_PubSub.ZMQPub(configfile, "PubSub_Global", "global")

    # FONCTIONS #
    publisher.info("Starting to publish.")

    while True:
        filename = r_serv.lpop("filelist")

        if filename != None:

            msg = cfg.get("PubSub_Global", "channel")+" "+filename
            PubGlob.send_message(msg)
            publisher.debug("{0} Published".format(msg))
        else:
            time.sleep(10)
            publisher.debug("Nothing to publish")
开发者ID:caar2000,项目名称:AIL-framework,代码行数:33,代码来源:ZMQ_Pub_Global.py

示例2: fetcher

def fetcher():
    """
        Main function which fetch the datasets
    """
    while config_db.sismember('modules', module):
        try:
            urllib.urlretrieve(url, temp_filename)
        except:
            publisher.error('Unable to fetch ' + url)
            __check_exit()
            continue
        drop_file = False
        """
            Check is the file already exists, if the same file is found,
            the downloaded file is dropped. Else, it is moved in his
            final directory.
        """
        to_check = glob.glob( os.path.join(old_directory, '*') )
        to_check += glob.glob( os.path.join(directory, '*') )
        for file in to_check:
            if filecmp.cmp(temp_filename, file):
                drop_file = True
                break
        if drop_file:
            os.unlink(temp_filename)
            publisher.debug('No new file on ' + url)
        else:
            os.rename(temp_filename, filename)
            publisher.info('New file on ' + url)
        __check_exit()
    config_db.delete(module + "|" + "fetching")
开发者ID:CIRCL,项目名称:bgp-ranking,代码行数:31,代码来源:fetch_raw_files.py

示例3: db_import

def db_import(filename, day):
    with open(filename, 'r') as f:
        entry = ''
        pipeline = routing_db.pipeline()
        i = 0
        for line in f:
            # End of block, extracting the information
            if line == '\n':
                i += 1
                parsed = re.findall('(?:ASPATH|PREFIX): ([^\n{]*)', entry)
                try:
                    block = parsed[0].strip()
                    # RIPE-NCC-RIS BGP IPv6 Anchor Prefix @RRC00
                    # RIPE-NCC-RIS BGP Anchor Prefix @ rrc00 - RIPE NCC
                    if block in ['2001:7fb:ff00::/48', '84.205.80.0/24',
                            '2001:7fb:fe00::/48', '84.205.64.0/24']:
                        asn = 12654
                    else:
                        asn = int(parsed[1].split()[-1].strip())
                    pipeline.hset(block, day, asn)
                except:
                    #FIXME: check the cause of the exception
                    publisher.warning(entry)
                entry = ''
                if i%10000 == 0:
                    pipeline.execute()
                    pipeline = routing_db.pipeline()
            else :
                # append the line to the current block.
                entry += line
        pipeline.execute()
        publisher.info('{f} finished, {nb} entries impported.'.\
                format(f=filename, nb = i))
开发者ID:klunejko,项目名称:IP-ASN-history,代码行数:33,代码来源:file_import.py

示例4: manager

def manager():
    """
        Manage (start/stop) the process (fetching/parsing) of the modules
    """
    modules = config_db.smembers('modules')
    modules_nr = len(modules)
    # Cleanup
    for module in modules:
        config_db.delete(module + '|parsing')
        config_db.delete(module + '|fetching')

    while True:
        for module in modules:
            parsing = config_db.get(module + "|" + "parsing")
            fetching = config_db.get(module + "|" + "fetching")
            if parsing is None:
                launch_parser(module)
            if fetching is None:
                launch_fetcher(module)

            parsing = config_db.get(module + "|" + "parsing")
            fetching = config_db.get(module + "|" + "fetching")
            if parsing == 0 and fetching == 0:
                config_db.srem('modules', module)

        modules = config_db.smembers('modules')
        if len(modules) != modules_nr:
            modules_nr = len(modules)
            publisher.info('These modules are running: ' + str(modules))
        else:
            time.sleep(sleep_timer)
开发者ID:CIRCL,项目名称:bgp-ranking,代码行数:31,代码来源:module_manager.py

示例5: launch

def launch():
    """
        Fetch all the whois entry assigned to the server of this :class:`Connector`
    """
    i = 0
    while True:
        try:
            entry = temp_db.spop(key_ris)
            if not entry:
                __disconnect()
                i = 0
                publisher.debug("Disconnected of " + server)
                time.sleep(sleep_timer)
                continue
            if cache_db.get(entry) is None:
                if not connected:
                    __connect()
                publisher.debug(server + ", query : " + str(entry))
                whois = fetch_whois(entry)
                if whois != '':
                    cache_db.setex(entry, server + '\n' + unicode(whois,  errors="replace"), cache_ttl)
                if not keepalive:
                    __disconnect()
            i += 1
            if i%10000 == 0:
                publisher.info(str(temp_db.scard(key_ris)) + ' to process on ' + server)
        except IOError as text:
            publisher.error("IOError on " + server + ': ' + str(text))
            time.sleep(sleep_timer)
            __disconnect()
开发者ID:pramos,项目名称:bgp-ranking,代码行数:30,代码来源:fetch_ris_entries.py

示例6: analyse

def analyse(url, path):
    faup.decode(url)
    url_parsed = faup.get()

    resource_path = url_parsed['resource_path']
    query_string = url_parsed['query_string']

    result_path = 0
    result_query = 0

    if resource_path is not None:
        result_path = is_sql_injection(resource_path)

    if query_string is not None:
        result_query = is_sql_injection(query_string)

    if (result_path > 0) or (result_query > 0):
        paste = Paste.Paste(path)
        if (result_path > 1) or (result_query > 1):
            print "Detected SQL in URL: "
            print urllib2.unquote(url)
            to_print = 'SQLInjection;{};{};{};{}'.format(paste.p_source, paste.p_date, paste.p_name, "Detected SQL in URL")
            publisher.warning(to_print)
            #Send to duplicate
            p.populate_set_out(path, 'Duplicate')
            #send to Browse_warning_paste
            p.populate_set_out('sqlinjection;{}'.format(path), 'BrowseWarningPaste')
        else:
            print "Potential SQL injection:"
            print urllib2.unquote(url)
            to_print = 'SQLInjection;{};{};{};{}'.format(paste.p_source, paste.p_date, paste.p_name, "Potential SQL injection")
            publisher.info(to_print)
开发者ID:Rafiot,项目名称:AIL-framework,代码行数:32,代码来源:SQLInjectionDetection.py

示例7: main

def main():
    """Main Function"""

    # CONFIG #
    cfg = ConfigParser.ConfigParser()
    cfg.read(configfile)

    # REDIS #
    r_serv = redis.StrictRedis(
        host = cfg.get("Redis_Queues", "host"),
        port = cfg.getint("Redis_Queues", "port"),
        db = cfg.getint("Redis_Queues", "db"))

    # LOGGING #
    publisher.channel = "Queuing"

    # ZMQ #
    Sub = ZMQ_PubSub.ZMQSub(configfile,"PubSub_Categ", "onion_categ", "tor")

    # FUNCTIONS #
    publisher.info("""Suscribed to channel {0}""".format("onion_categ"))

    while True:
        Sub.get_and_lpush(r_serv)

        if r_serv.sismember("SHUTDOWN_FLAGS", "Onion_Q"):
            r_serv.srem("SHUTDOWN_FLAGS", "Onion_Q")
            print "Shutdown Flag Up: Terminating"
            publisher.warning("Shutdown Flag Up: Terminating.")
            break
开发者ID:caar2000,项目名称:AIL-framework,代码行数:30,代码来源:ZMQ_Sub_Onion_Q.py

示例8: create_tld_list

def create_tld_list(url = "https://mxr.mozilla.org/mozilla-central/source/netwerk/dns/effective_tld_names.dat?raw=1"):
    """Recover a tld list from url.

    :param url: -- The url of the tld list.
    :return: -- list

    This function recover from mozilla.org the list of the effective tld names,
    Save it as a file, and return a list of all the tld.


    """
    domains = []
    htmlSource = urllib.urlopen(url).read()
    with open("ICCANdomain", 'wb') as F:
        F.write(htmlSource)

    with open("ICCANdomain", 'rb') as F:

        for num, line in enumerate(F):
            if re.match(r"^\/\/|\n", line) == None:
                domains.append(re.sub(r'\*', '', line[:-1]))
            else:
                publisher.info("Comment line ignored.")

    return domains
开发者ID:caar2000,项目名称:AIL-framework,代码行数:25,代码来源:lib_refine.py

示例9: main

def main():
    """Main Function"""

    # CONFIG #
    cfg = ConfigParser.ConfigParser()
    cfg.read(configfile)

    # REDIS #
    r_serv = redis.StrictRedis(
        host = cfg.get("Redis_Queues", "host"),
        port = cfg.getint("Redis_Queues", "port"),
        db = cfg.getint("Redis_Queues", "db"))

    # LOGGING #
    publisher.channel = "Queuing"

    # ZMQ #
    channel = cfg.get("PubSub_Words", "channel_0")
    subscriber_name = "curve"
    subscriber_config_section = "PubSub_Words"

    Sub = ZMQ_PubSub.ZMQSub(configfile, subscriber_config_section, channel, subscriber_name)
    # FUNCTIONS #
    publisher.info("""Suscribed to channel {0}""".format(channel))

    while True:
        Sub.get_and_lpush(r_serv)

        if r_serv.sismember("SHUTDOWN_FLAGS", "Curve_Q"):
            r_serv.srem("SHUTDOWN_FLAGS", "Curve_Q")
            print "Shutdown Flag Up: Terminating"
            publisher.warning("Shutdown Flag Up: Terminating.")
            break
开发者ID:caar2000,项目名称:AIL-framework,代码行数:33,代码来源:ZMQ_Sub_Curve_Q.py

示例10: refining_regex_dataset

def refining_regex_dataset(r_serv, r_key, regex, min_match, year, month, luhn = True, dnscheck = True):
    """Refine the "raw dataset" of paste with regulars expressions

    :param r_serv: -- Redis connexion database
    :param r_key: -- (str) The name of the key read in redis (often the name of
        the keywords category list)
    :param min_match: -- (int) Below this number file are deleted
    :param regex: -- Regular expression which will be match.

    This function Refine database created with classify_token_paste function.
    It opening again the files which matchs the keywords category list, found
    regular expression inside it and count how many time is found.

    If there is not too much match about the regular expression the file is
    deleted from the list.

    Than it finally merge the result by day to be able to create a bar graph
    which will represent how many occurence by day the regex match.

    """
    for filename in r_serv.zrange(r_key, 0, -1):

        with gzip.open(filename, 'rb') as F:
            var = 0
            matchs = set([])

            for num, kword in enumerate(F):

                match = re.findall(regex, kword)
                var += len(match)

                for y in match:
                    if y != '' and len(y) < 100:
                        matchs.add(y)
            # If there is less match than min_match delete it (False pos)
            if len(matchs) <= min_match :
                r_serv.zrem(r_key, filename)
                publisher.debug("{0} deleted".format(filename))
            else:
            # else changing the score.
                if r_key == "creditcard_categ" and luhn:
                    for card_number in matchs:
                        if is_luhn_valid(card_number):

                            r_serv.zincrby(r_key+'_occur', filename, 1)

                            publisher.info("{1} is valid in the file {0}".format(filename, card_number))
                        else:
                            publisher.debug("{0} card is invalid".format(card_number))

                if r_key == "mails_categ" and dnscheck:
                    r_serv.zadd(r_key+'_occur', checking_MX_record(r_serv, matchs), filename)

                else:
                    # LUHN NOT TRIGGERED (Other Categs)
                    r_serv.zadd(r_key+'_occur',
                        len(matchs),
                        filename)

    create_graph_by_day_datastruct(r_serv, r_key, year, month)
开发者ID:caar2000,项目名称:AIL-framework,代码行数:60,代码来源:lib_refine.py

示例11: main

def main():
    """Main Function"""

    # CONFIG #
    cfg = ConfigParser.ConfigParser()
    cfg.read(configfile)

    # REDIS #
    r_serv = redis.StrictRedis(
        host = cfg.get("Redis_default", "host"),
        port = cfg.getint("Redis_default", "port"),
        db = args.db)

    p_serv = r_serv.pipeline(False)

    # LOGGING #
    publisher.channel = "Script"

    # ZMQ #
    channel = cfg.get("PubSub_Longlines", "channel_0")
    Sub = ZMQ_PubSub.ZMQSub(configfile, "PubSub_Longlines", channel)

    # FUNCTIONS #
    publisher.info("Longlines ubscribed to channel {0}".format(cfg.get("PubSub_Longlines", "channel_0")))

    while True:
        PST = P.Paste(Sub.get_message().split(" ", -1)[-1])
        r_serv.sadd("Longlines", PST.p_mime)
        PST.save_in_redis(r_serv, PST.p_mime)
开发者ID:caar2000,项目名称:AIL-framework,代码行数:29,代码来源:ZMQ_Sub_Longlines.py

示例12: prepare_bview_file

def prepare_bview_file():
    publisher.info('Start converting binary bview file in plain text...')

    # create the plain text dump from the binary dump
    output = open(os.path.join(bview_dir, 'bview'), 'wr')
    nul_f = open(os.devnull, 'w')
    bgpdump = os.path.join(root_dir, path_to_bgpdump_bin)
    p_bgp = Popen([bgpdump , filename], stdout=PIPE, stderr = nul_f)
    for line in p_bgp.stdout:
        output.write(line)
    nul_f.close()
    output.close()
    publisher.info('Convertion finished, start splitting...')

    # Split the plain text file
    fs = FilesSplitter(output.name, number_of_splits)
    splitted_files = fs.fplit()
    publisher.info('Splitting finished.')

    # Flush the old routing database and launch the population of
    # the new database
    routing_db.flushdb()

    publisher.info('Start pushing all routes...')
    pushing_process_service = os.path.join(services_dir, "pushing_process")
    run_splitted_processing(split_procs, pushing_process_service,
            splitted_files)
    publisher.info('All routes pushed.')

    # Remove the binary and the plain text files
    os.unlink(output.name)
    os.unlink(filename)
开发者ID:bloodc0r3,项目名称:bgp-ranking,代码行数:32,代码来源:push_update_routing.py

示例13: main

def main():
    """Main Function"""

    # CONFIG #
    cfg = ConfigParser.ConfigParser()
    cfg.read(configfile)

    # REDIS #
    r_serv = redis.StrictRedis(
        host = cfg.get("Redis_Queues", "host"),
        port = cfg.getint("Redis_Queues", "port"),
        db = cfg.getint("Redis_Queues", "db"))

    # LOGGING #
    publisher.channel = "Script"

    # ZMQ #
    channel = cfg.get("PubSub_Longlines", "channel_1")
    subscriber_name = "tokenize"
    subscriber_config_section = "PubSub_Longlines"

    #Publisher
    publisher_config_section = "PubSub_Words"
    publisher_name = "pubtokenize"

    Sub = ZMQ_PubSub.ZMQSub(configfile, subscriber_config_section, channel, subscriber_name)
    Pub = ZMQ_PubSub.ZMQPub(configfile, publisher_config_section, publisher_name)

    channel_0 = cfg.get("PubSub_Words", "channel_0")

    # FUNCTIONS #
    publisher.info("Tokeniser subscribed to channel {0}".format(cfg.get("PubSub_Longlines", "channel_1")))

    while True:
        message = Sub.get_msg_from_queue(r_serv)
        print message
        if message != None:
            PST = P.Paste(message.split(" ",-1)[-1])
        else:
            if r_serv.sismember("SHUTDOWN_FLAGS", "Tokenize"):
                r_serv.srem("SHUTDOWN_FLAGS", "Tokenize")
                print "Shutdown Flag Up: Terminating"
                publisher.warning("Shutdown Flag Up: Terminating.")
                break
            publisher.debug("Tokeniser is idling 10s")
            time.sleep(10)
            print "sleepin"
            continue

        for word, score in PST._get_top_words().items():
            if len(word) >= 4:
                msg = channel_0+' '+PST.p_path+' '+str(word)+' '+str(score)
                Pub.send_message(msg)
                print msg
            else:
                pass
开发者ID:caar2000,项目名称:AIL-framework,代码行数:56,代码来源:ZMQ_PubSub_Tokenize.py

示例14: redis_interbargraph_set

def redis_interbargraph_set(r_serv, year, month, overwrite):
    """Create a Redis sorted set.

    :param r_serv: -- connexion to redis database
    :param year: -- (integer) The year to process
    :param month: -- (integer) The month to process
    :param overwrite: -- (bool) trigger the overwrite mode

    This function create inside redis the intersection of all days in
    a month two by two.
    Example:
    For a month of 31days it will create 30 sorted set between day and
    day+1 until the last day.
    The overwrite mode delete the intersets and re-create them.

    """
    a = date(year, month, 01)
    b = date(year, month, cal.monthrange(year, month)[1])

    if overwrite:
        r_serv.delete("InterSet")

        for dt in rrule(DAILY, dtstart = a, until = b - timedelta(1)):
            dayafter = dt+timedelta(1)

            r_serv.delete(str(dt.strftime("%Y%m%d"))+str(dayafter.strftime("%Y%m%d")))

            r_serv.zinterstore(
                str(dt.strftime("%Y%m%d"))+str(dayafter.strftime("%Y%m%d")),
                {str(dt.strftime("%Y%m%d")):1,
                str(dayafter.strftime("%Y%m%d")):-1})

            r_serv.zadd(
                "InterSet",
                1,
                str(dt.strftime("%Y%m%d"))+str(dayafter.strftime("%Y%m%d")))
    else:
        for dt in rrule(DAILY, dtstart = a, until = b - timedelta(1)):
            dayafter = dt+timedelta(1)

            if r_serv.zcard(str(dt.strftime("%Y%m%d"))+str(dayafter.strftime("%Y%m%d"))) == 0:

                r_serv.zinterstore(
                    str(dt.strftime("%Y%m%d"))+str(dayafter.strftime("%Y%m%d")),
                    {str(dt.strftime("%Y%m%d")):1,
                    str(dayafter.strftime("%Y%m%d")):-1})

                r_serv.zadd(
                    "InterSet",
                    1,
                    str(dt.strftime("%Y%m%d"))+str(dayafter.strftime("%Y%m%d")))

                publisher.info(str(dt.strftime("%Y%m%d"))+str(dayafter.strftime("%Y%m%d"))+" Intersection Created")

            else:
                publisher.warning("Data already exist, operation aborted.")
开发者ID:caar2000,项目名称:AIL-framework,代码行数:56,代码来源:lib_words.py

示例15: service_start_multiple

def service_start_multiple(servicename, number, param = None):
    """
        Start multiple services using `service_start` and save their pids
    """
    i = 0
    publisher.info('Starting ' + str(number) + ' times ' + servicename)
    while i < number:
        proc = service_start(servicename, param)
        writepid(servicename, proc)
        i += 1
开发者ID:CIRCL,项目名称:bgp-ranking,代码行数:10,代码来源:initscript.py


注:本文中的pubsublogger.publisher.info函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。