當前位置: 首頁>>代碼示例>>Python>>正文


Python bs4.UnicodeDammit類代碼示例

本文整理匯總了Python中bs4.UnicodeDammit的典型用法代碼示例。如果您正苦於以下問題:Python UnicodeDammit類的具體用法?Python UnicodeDammit怎麽用?Python UnicodeDammit使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


在下文中一共展示了UnicodeDammit類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: learn

    def learn(self, name, phrase, channel):
        name = self.aliases.resolve(name)
        if name not in self.users:
            self.users[name] = True

        if "password" in phrase:
            return
        phrase = phrase.split(" ")
        phrase = filter(lambda x: x and "http" not in x and "ftp:" not in x and x[0] != ".", phrase)
        now = datetime.datetime.utcnow()
        documents = []

        for i in range(len(phrase) + 1):
            seed = UnicodeDammit.detwingle(phrase[i-1] if i > 0 else "")
            answer = UnicodeDammit.detwingle(phrase[i] if i < len(phrase) else "")

            documents.append({
                "name": name,
                "seed": seed,
                "answer": answer,
                "added": now,
                "random": random.random()
            })

        yield self.db.insert(documents, safe=True)
開發者ID:jdpls,項目名稱:Servrhe,代碼行數:25,代碼來源:markov.py

示例2: create

    def create(self, soupfragment):
        result = dict()
        field = self._getfield_info(soupfragment)
        title = ""
        result["link"] = ""
        result["answers"] = ""
        result["views"] = ""
        result["location"] = ""
        if self.urlobject is not None:
            result["location"] = self.urlobject.description()

        #result['location'] = self.webclient.get_url_desc()
        if field is not None:
            title = UnicodeDammit(field.a.contents[0]).unicode_markup
            result["link"] = field.a['href']
            fragment = self._get_answer_and_viewa_fragment(soupfragment)
            if fragment is not None:
                result["answers"] = self._get_number_from(fragment.contents[0].strip())
                result["views"] = self._get_number_from(fragment.contents[2].strip())
            else:
                print "No answer and view bloq identified in thread: ", result["link"]
                result["answers"] = -1
                result["views"] = -1

        result["title"] = title.strip()

        #result['next_url'] = _nextUrl(soupfragment)
        return result
開發者ID:javierj,項目名稱:LaPISK,代碼行數:28,代碼來源:HTML2Objects.py

示例3: _sub_read

    def _sub_read(self, f):
        example_num = 0
        curr_id = 'EXAMPLE_0'
        for line in f:
            # Process encoding
            if not isinstance(line, text_type):
                line = UnicodeDammit(line, ['utf-8',
                                            'windows-1252']).unicode_markup
            line = line.strip()
            # Handle instance lines
            if line.startswith('#'):
                curr_id = line[1:].strip()
            elif line and line not in ['TRAIN', 'TEST', 'DEV']:
                split_line = line.split()
                num_cols = len(split_line)
                del line
                # Line is just a class label
                if num_cols == 1:
                    class_name = safe_float(split_line[0],
                                            replace_dict=self.class_map)
                    field_pairs = []
                # Line has a class label and feature-value pairs
                elif num_cols % 2 == 1:
                    class_name = safe_float(split_line[0],
                                            replace_dict=self.class_map)
                    field_pairs = split_line[1:]
                # Line just has feature-value pairs
                elif num_cols % 2 == 0:
                    class_name = None
                    field_pairs = split_line

                curr_info_dict = {}
                if len(field_pairs) > 0:
                    # Get current instances feature-value pairs
                    field_names = islice(field_pairs, 0, None, 2)
                    # Convert values to floats, because otherwise
                    # features'll be categorical
                    field_values = (safe_float(val) for val in
                                    islice(field_pairs, 1, None, 2))

                    # Add the feature-value pairs to dictionary
                    curr_info_dict.update(zip(field_names, field_values))

                    if len(curr_info_dict) != len(field_pairs) / 2:
                        raise ValueError(('There are duplicate feature ' +
                                          'names in {} for example ' +
                                          '{}.').format(self.path_or_list,
                                                        curr_id))

                yield curr_id, class_name, curr_info_dict

                # Set default example ID for next instance, in case we see a
                # line without an ID.
                example_num += 1
                curr_id = 'EXAMPLE_{}'.format(example_num)
開發者ID:hefeix,項目名稱:skll,代碼行數:55,代碼來源:readers.py

示例4: corpus_generator

 def corpus_generator(self):
     with open(self.corpus_path, 'rb') as f:
         i = 0
         for line in f:
             line = UnicodeDammit(line.strip()).unicode_markup
             if line:
                 if self.lower:
                     line = line.lower()
                 i += 1
                 if i % 100000 == 0:
                     logging.info('Read {} nonblank lines'.format(i))
                 for tok in re.split(r'\s+', line):
                     yield tok
開發者ID:DevSinghSachan,項目名稱:tan-clustering,代碼行數:13,代碼來源:class_lm_cluster.py

示例5: convert_to_libsvm

def convert_to_libsvm(lines):
    '''
    Converts a sequence of lines (e.g., a file or list of strings) in MegaM
    format to LibSVM format.

    :param lines: The sequence of lines to convert.
    :type lines: L{file} or L{list} of L{str}

    :return: A tuple of the newly formatted data, the mappings from class names
             to numbers, and the mappings from feature names to numbers.
    :rtype: 3-L{tuple} of (L{list} of L{unicode}, L{dict}, and L{dict})
    '''

    # Initialize variables
    field_num_dict = UniqueNumberDict()
    class_num_dict = UniqueNumberDict()

    result_list = []
    # Iterate through MegaM file
    for line in lines:
        line_fields = set()
        # Process encoding
        line = UnicodeDammit(line, ['utf-8', 'windows-1252']).unicode_markup.strip()

        # Ignore comments (and TEST/DEV lines)
        if not line.startswith('#') and not line == 'TEST' and not line == 'DEV':
            result_string = ''
            split_line = line.split()
            result_string += '{0}'.format(class_num_dict[split_line[0]])
            # Handle features if there are any
            if len(split_line) > 1:
                del split_line[0]
                # Loop through all feature-value pairs printing out pairs
                # separated by commas (and with feature names replaced with
                # numbers)
                for field_num, value in sorted(zip((field_num_dict[field_name] for field_name in islice(split_line, 0, None, 2)),
                                                   (float(value) if value != 'N/A' else 0.0 for value in islice(split_line, 1, None, 2)))):
                    # Check for duplicates
                    if field_num in line_fields:
                        field_name = (field_name for field_name, f_num in field_num_dict.items() if f_num == field_num).next()
                        raise AssertionError("Field {} occurs on same line twice.".format(field_name))
                    # Otherwise output non-empty features
                    elif value != 'N/A' and float(value):
                        result_string += ' {}:{}'.format(field_num, value)
                        line_fields.add(field_num)
            result_list.append(result_string)

    return result_list, class_num_dict, field_num_dict
開發者ID:manugarri,項目名稱:skll,代碼行數:48,代碼來源:megam_to_libsvm.py

示例6: ramble

    def ramble(self, name=None, seed=""):
        if name:
            name = self.aliases.resolve(name)
            if name not in self.users:
                returnValue("")

        message = []

        if seed:
            seed = UnicodeDammit.detwingle(seed)
            chunk = seed
            while chunk and len(" ".join(message)) < 300:
                message.append(chunk)
                chunk = yield self.prev(name, chunk)
            message.reverse()

        chunk = yield self.next(name, seed)
        while chunk and len(" ".join(message)) < 300:
            message.append(chunk)
            chunk = yield self.next(name, chunk)
            if not chunk and len(" ".join(message)) < 30:
                chunk = yield self.next(name, chunk)

        response = (" ".join(message)).decode("utf8")
        if seed and response == seed.decode("utf8"):
            response = yield self.ramble(name)
        returnValue(response)
開發者ID:jdpls,項目名稱:Servrhe,代碼行數:27,代碼來源:markov.py

示例7: _fetch_data

    def _fetch_data(self, entry_name, url):
        # url = url.decode('utf-8')
        # if url[:5] == 'http:':
        #     url = 'https' + url[4:]
        # url = url.encode('utf-8')
        original_entry_name = entry_name
        data = dict()
        try:
            with contextlib.closing(urllib2.urlopen(url.encode('utf-8'))) as page_source:
                page_content = page_source.read()
            doc = UnicodeDammit(page_content, is_html=True)
            parser = lxml.html.HTMLParser(encoding=doc.original_encoding)
            doc = lxml.html.document_fromstring(page_content, parser=parser)

            bar_name = doc.xpath('//a[contains(@class, "star_title_h3")]')
            if not bar_name:
                bar_name = doc.xpath('//a[contains(@class, "card_title_fname")]')
            if type(bar_name) is list and len(bar_name) > 0:
                entry_name = bar_name[0].text_content().strip()
            num_visits = doc.xpath('//span[contains(@class, "j_visit_num")]')
            if not num_visits:
                num_visits = doc.xpath('//span[contains(@class, "card_menNum")]')
            num_posts = doc.xpath('//span[contains(@class, "j_post_num")]')
            if not num_posts:
                num_posts = doc.xpath('//span[contains(@class, "card_infoNum")]')
            if type(num_visits) is list and len(num_visits) > 0:
                num_visits = num_visits[0].text_content()
                num_visits = cogtu_misc.get_first_number_from_text(num_visits)
            else:
                num_visits = 0
            if type(num_posts) is list and len(num_posts) > 0:
                num_posts = num_posts[0].text_content()
                num_posts = cogtu_misc.get_first_number_from_text(num_posts)
            else:
                num_posts = 0
            num_groups = doc.xpath("//a[contains(@class, 'star_nav_ico_group')]/span")
            if type(num_groups) is list and len(num_groups) > 0:
                num_groups = num_groups[0].text_content()
                num_groups = cogtu_misc.get_first_number_from_text(num_groups)
            else:
                num_groups = 0
        except urllib2.HTTPError:
            logging.info('urllib2.HTTPError. Skip.')
            return None, None
        except urllib2.URLError:
            logging.info('urllib2.URLError. Skip.')
            return None, None

        data['num_visits'] = int(num_visits)
        data['num_posts'] = int(num_posts)
        data['num_groups'] = int(num_groups)
        data['entry_name'] = entry_name
        data['original_entry_name'] = original_entry_name
        data['url'] = url
        return entry_name, data
開發者ID:habemusne,項目名稱:flexibloader,代碼行數:55,代碼來源:worker_tieba_data.py

示例8: __init__

 def __init__(self,url):# logs info,warning,error,critical,debug events.
     '''
     Description: This is the class constructor and is going to get a simple url as input and parse it based on RFC1738.
     Status: In Progress.
     Usage: This is going to be used by by the connection manager and the active/passive scanner to extract url variables.
     '''
     self.url = UnicodeDammit.detwingle(url, 'UTF-8')        
     self.defaultHttpsPort = 443
     self.defaultHttpPort = 80
     urlLogger.logInfo("--- Package: UrlManager - Module: UrlHandler Class: urlHandler Initiated ---")
開發者ID:rekcahemal,項目名稱:CapCake,代碼行數:10,代碼來源:URLAnalyzer.py

示例9: remove_evernote_link

def remove_evernote_link(link, html):
    html = UnicodeDammit(html, ["utf-8"], is_html=True).unicode_markup
    link_converted = UnicodeDammit(link.WholeRegexMatch, ["utf-8"], is_html=True).unicode_markup
    sep = u'<span style="color: rgb(105, 170, 53);"> | </span>'
    sep_regex = escape_regex(sep)
    no_start_tag_regex = r"[^<]*"
    regex_replace = r"<{0}[^>]*>[^<]*{1}[^<]*</{0}>"
    # html = re.sub(regex_replace.format('li', link.WholeRegexMatch), "", html)
    # Remove link
    html = html.replace(link.WholeRegexMatch, "")
    # Remove empty li
    html = re.sub(regex_replace.format("li", no_start_tag_regex), "", html)
    # Remove dangling separator

    regex_span = regex_replace.format("span", no_start_tag_regex) + no_start_tag_regex + sep_regex
    html = re.sub(regex_span, "", html)
    # Remove double separator
    html = re.sub(sep_regex + no_start_tag_regex + sep_regex, sep_regex, html)
    return html
開發者ID:holycrepe,項目名稱:anknotes,代碼行數:19,代碼來源:shared.py

示例10: selectdir

   def selectdir(geturl):
      r = scraper.get(geturl, stream=True, verify=False, proxies=proxystring, allow_redirects=True)
      rt = UnicodeDammit.detwingle(r.text)
      html = BeautifulSoup(rt.decode('utf-8'), "html.parser")
      if debug == 1:
         orenc = str(html.original_encoding)
         print('\n\033[40m\033[35;1mORIGINAL ENCODING: %s \033[0m\n' % orenc)
      findlinks = html.findAll('a')
      dirlist = []
      for link in findlinks:
         b = link.get('href')
         if not re.match(r'^((\.\.)?\/)$', str(b)):
            if re.search(r'^(.*)(\/)$', str(b)):
               dirlist.append(b)

      p = urlparse(geturl)
      part = p.path.split('/')[-1]
      path = p.path.rstrip(part)
      if '/' not in path[:1]:
         path = '/' + path
      urlfqdn = p.scheme + '://' + p.netloc
      parent = urlfqdn + path

      i = 0
      dirtotal = len(dirlist)
      if dirtotal > 0:
         print('\nFOUND %d DIRECTORIES: \n' % dirtotal)
         while i < dirtotal:
            sel = i + 1
            print(str(sel) + ' - ' + str(dirlist[i]))
            i += 1
         print('')
         lim = dirtotal + 1
         matchtop = r'^(%s)(\/)?$' % urlfqdn
         if not re.match(matchtop,geturl):
            print('0 - BACK TO PARENT DIRECTORY \n')
            startsel = '0-%d' % dirtotal
         else:
            startsel = '1-%d' % dirtotal
         selectdir = raw_input('make a selection [%s] --> ' % startsel)
         if not int(selectdir) in range(0, lim):
            selectdir = raw_input('invalid entry. please enter a selection %s --> ' % startsel)
         if selectdir == '0':
            geturl = parent
            subcont = 0
         else:
            n = int(selectdir) - 1
            usedir = dirlist[n]
            geturl = parent + usedir
            subcont = 1
      else:
         print('\nNO DIRECTORIES FOUND. using current directory.. \n')
         subcont = 0
         geturl = parent + part
      return geturl, subcont, parent
開發者ID:johnjohnsp1,項目名稱:cloudget,代碼行數:55,代碼來源:cloudget.py

示例11: clean_google_title

	def clean_google_title(self, title):
		has_dot = False
		
		titleCleaned = UnicodeDammit(title).unicode_markup
		# clean step 1
		# BUGFIX: don't remove [xxx]. eg: "OQL[C++]: Ext...'
		titleCleaned = re.sub("(<(.*?)>)", "", titleCleaned)
		re_hasdot = re.compile("(\.\.\.|&hellip;)", re.I)
		match = re_hasdot.search(title)
		if match is not None:
			has_dot = True
			# clean step 2, here title is readable
		titleCleaned = re.sub("(&nbsp;|&#x25ba;|&hellip;)", "", titleCleaned)
		titleCleaned = re.sub("(&#.+?;|&.+?;)", "", titleCleaned)
		titleCleaned = titleCleaned.strip()
		readableTitle = titleCleaned
		# Shrink, only letters left
		titleCleaned = re.sub("\W", "", titleCleaned)
		titleCleaned = titleCleaned.lower()
		return (readableTitle, titleCleaned, has_dot)
開發者ID:AlexLyj,項目名稱:aminer-spider,代碼行數:20,代碼來源:extractor.py

示例12: document_generator

def document_generator(path, lower=False):
    '''
    Default document reader.  Takes a path to a file with one document per line,
    with tokens separate by whitespace, and yields lists of tokens per document.
    This could be replaced by any function that yields lists of tokens.
    See main() for how it is called.

    Note: this uses BeautifulSoup's UnicodeDammit to convert to unicode.
    '''
    with open(path, 'rb') as f:
        i = 0
        for line in f:
            line = UnicodeDammit(line.strip()).unicode_markup
            if line:
                if lower:
                    line = line.lower()
                i += 1
                if i % 100000 == 0:
                    logging.info('Read {} nonblank lines'.format(i))
                yield re.split(r'\s+', line)
開發者ID:nickmarton,項目名稱:NLP,代碼行數:20,代碼來源:pmi_cluster.py

示例13: formatForReddit

    def formatForReddit(self, feedEntry, postType, subreddit, raw):
        if 'content' in feedEntry:
          content = feedEntry['content'][0]['value']
        elif 'description' in feedEntry:
          content = feedEntry.description
        else:
          content = ''
        logging.debug(content)
        parser = EveRssHtmlParser()
        
        title = feedEntry['title']

        # some feeds like Twitter are raw so the parser hates it.
        if (raw):
          regex_of_url = '(https?:\/\/[\dA-z\.-]+\.[A-z\.]{2,6}[\/\w&=#\.\-\?]*)'
          title = re.sub(regex_of_url, '', title)
          clean_content = content.replace(' pic.twitter.com', ' http://pic.twitter.com')
          clean_content = re.sub(regex_of_url, '<a href="\\1">link</a>', clean_content)
          clean_content = UnicodeDammit.detwingle(clean_content)
          #logging.info(clean_content)
          u = UnicodeDammit(clean_content, 
                      smart_quotes_to='html', 
                      is_html = False )
          # fix twitter putting ellipses on the end
          content = u.unicode_markup.replace(unichr(8230),' ...')
          logging.debug('.....')
        
        if "tumblr.com" in content:
          # Replace with larger images (hopefully such images exist)
          content = content.replace('_500.', '_1280.')
        
        # Added the .replace because the parser does something funny to them and 
        # removes them before I can handle them
        content = content.replace('&nbsp;', ' ')
        content = content.replace('&bull;', '*').replace('&middot;','*')
        content = content.replace('&ldquo;','\'').replace('&rdquo;','\'')
        content = re.sub('( [ ]+)', ' ', content)
        parser.feed(content)
        parser.comments[0] = '%s\n\n%s' %(feedEntry['link'], parser.comments[0])
        parser.comments[-1] += self.config['signature']
        
        if 'author' in feedEntry:
          author = '~' + feedEntry['author'].replace('@', ' at ')
        else:
          author = ''

        return {'comments': parser.comments,
                'link':     feedEntry['link'],
                'subreddit': subreddit,
                'title':    '[%s] %s %s' %(postType, title, author)}
開發者ID:eveRedditBot,項目名稱:eve_reddit_bot,代碼行數:50,代碼來源:main.py

示例14: getContent

def getContent(soup, source=''):
    newContent = []
    # Cleanning phase
    genericCleaning(soup)
    sourceSpecificcleaning(soup, source)

    # f = open("content.html", 'w'); f.write(soup.prettify().encode('utf-8')); f.close();
    # Finding content in the tree
    bestElem = None; bestText = '';
    for el in soup.findAll(True):
        score = 0.0;  hasTitle = False
        if el.name in ['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'h7'] and el.parent.name == '[document]':
            score += 3
        for c in el:
            if c.name == 'br': # business insider style
                score += 0.5
            if c.name == 'p':
                score += 1.0
            if not hasTitle and c.name in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'h7']:
                score += 1.0
                hasTitle = True
        if score >= 3.0: # at least 3 paragraphs
            textOutput = getText(el)
            if float(len(textOutput))/score > 20.0: # we need at least 20 characters per container
                newContent.append(textOutput)
        elif score >= 1.0:
            if bestElem is None:
                bestElem = el; bestText = getText(el, False)
            else:
                a = getText(el, False)
                if bestElem is None or len(a) > len(bestText):
                    bestElem = el; bestText = a
    if len(newContent) == 0 and bestElem is not None: # in case nothing had a score of 3, but something had a score of 1 or more
        newContent.append(bestText)

    finalText = UnicodeDammit(u'\n'.join(newContent), smart_quotes_to='ascii').unicode_markup
    return finalText.replace('\n\n', '\n')
開發者ID:gt-big-data,項目名稱:QDoc,代碼行數:37,代碼來源:articleParser.py

示例15: normalize

def normalize(s):
    if isinstance(s, unicode):
        return s

    try:
        u = s.decode("utf8")
    except:
        try:
            u = (s[:-1]).decode("utf8")
        except:
            try:
                u = UnicodeDammit.detwingle(s).decode("utf8")
            except:
                u = UnicodeDammit(s, ["utf8", "windows-1252"]).unicode_markup

    return u
開發者ID:skiddiks,項目名稱:Servrhe,代碼行數:16,代碼來源:irc.py


注:本文中的bs4.UnicodeDammit類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。