当前位置: 首页>>代码示例>>Python>>正文


Python Analyzer.analyze方法代码示例

本文整理汇总了Python中analyzer.Analyzer.analyze方法的典型用法代码示例。如果您正苦于以下问题:Python Analyzer.analyze方法的具体用法?Python Analyzer.analyze怎么用?Python Analyzer.analyze使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在analyzer.Analyzer的用法示例。


在下文中一共展示了Analyzer.analyze方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: analyze

# 需要导入模块: from analyzer import Analyzer [as 别名]
# 或者: from analyzer.Analyzer import analyze [as 别名]
 def analyze(self):
     for i, line in enumerate(self.segment):
         if i == 0:
             self.vicar.name = line
         else:
             analyzer = Analyzer(line, self.vicar)
             analyzer.analyze()
开发者ID:RPOD,项目名称:ffe.pfarrerbuch,代码行数:9,代码来源:segmentAnalyzer.py

示例2: parse

# 需要导入模块: from analyzer import Analyzer [as 别名]
# 或者: from analyzer.Analyzer import analyze [as 别名]
def parse(path, f=None):
    p = Parser(path=path)
    p.parse_file()
    a = Analyzer(parser=p)
    a.analyze()
    j = Packer(analyzer=a)
    if f is None:
        return j.pack()
    else:
        j.pack(f=f)
开发者ID:hotelzululima,项目名称:macholibre,代码行数:12,代码来源:macholibre.py

示例3: __trainModel

# 需要导入模块: from analyzer import Analyzer [as 别名]
# 或者: from analyzer.Analyzer import analyze [as 别名]
 def __trainModel(self, verbose=False, trainRatio=.5):
     if verbose:
         analyzer = Analyzer(self.__model, self.__dataset, trainRatio)
         analyzer.start()
     if self.__dataset.trainSampleCount > 0:
         self.__model.train(self.__dataset.trainSamples, self.__dataset.trainResponses)
     if verbose:
         analyzer.stop()
         analyzer.analyze()
         print analyzer
开发者ID:xsyann,项目名称:ocr,代码行数:12,代码来源:ocr.py

示例4: processFile

# 需要导入模块: from analyzer import Analyzer [as 别名]
# 或者: from analyzer.Analyzer import analyze [as 别名]
def processFile(path):
    try:
        p = Parser(path=path)
        p.parseFile()
        a = Analyzer(parser=p)
        a.analyze()
        j = Packer(analyzer=a)
        return j.pack()
    except:
        print path
        exit(1)
开发者ID:fxfactorial,项目名称:macholibre,代码行数:13,代码来源:create_model.py

示例5: classification_preprocess_all_datasets

# 需要导入模块: from analyzer import Analyzer [as 别名]
# 或者: from analyzer.Analyzer import analyze [as 别名]
def classification_preprocess_all_datasets():
    """
    Preprocesses all datasets to be ready for classification task.
    This will include stemming, word correction, lower-casing, hashtag removal, special char removal.
    """
    
    for i in range(0,len(utils.annotated_datasets)):
        tweetlines = utils.get_dataset(utils.annotated_datasets[i])
        tweets = []
        for line in tweetlines:
            if len(line)>1:
                tweets.append(tweet.to_tweet(line))
        
#        tweets = lower_case(tweets)
        tweets = remove_hastags_and_users(tweets)
        tweets = count_emoticons(tweets)
        tweets = replace_links(tweets)
        tweets = remove_specialchars(tweets)
        tweets = correct_words(tweets)
        tweets = stem(tweets)
        tweets = tokenize(tweets)
        tweets = pos_tag(tweets)
        tweets = count_exclamations(tweets)

        analyzer = Analyzer(utils.annotated_datasets[i], tweets)
        stats = analyzer.analyze()
        print stats
        #store tweets in pickles...
        print "Storing pickles..."
        utils.store_pickles(tweets, utils.annotated_datasets[i][24:len(utils.annotated_datasets[i])-4])
开发者ID:JohnArneOye,项目名称:twitter-sentiment,代码行数:32,代码来源:preprocessing.py

示例6: __trainModel

# 需要导入模块: from analyzer import Analyzer [as 别名]
# 或者: from analyzer.Analyzer import analyze [as 别名]
 def __trainModel(self, trainRatio=.5, errorsIteration=0, log=None):
     if log:
         analyzer = Analyzer(self.__model, self.__dataset, trainRatio)
         analyzer.start()
     self.__model.train(self.__dataset.trainSamples,
                        self.__dataset.trainResponses)
     samples, responses = self.__dataset.testSamples, self.__dataset.testResponses
     i = 0
     while responses.any() and i < errorsIteration:
         samples, responses = self.__injectErrors(samples, responses)
         i += 1
     self.__dataset.testSamples = samples
     self.__dataset.testResponses = responses
     if log:
         analyzer.stop()
         analyzer.analyze()
         log(str(analyzer))
开发者ID:xsyann,项目名称:mediocre,代码行数:19,代码来源:ocr.py

示例7: brute

# 需要导入模块: from analyzer import Analyzer [as 别名]
# 或者: from analyzer.Analyzer import analyze [as 别名]
def brute(ciphertext):
    analyzer = Analyzer()
    highestText  = ""
    highestValue = 0
    keys = [chr(i) for i in xrange(0, 0x100)]
    for key in keys:
        message = xor(ciphertext, key)
        value = analyzer.analyze(message)
        if (value > highestValue):
            highestValue = value
            highestText  = message
    print "[Score = %f] %s" % (highestValue, highestText)
开发者ID:FrankSpierings,项目名称:Cryptopals,代码行数:14,代码来源:main.py

示例8: search

# 需要导入模块: from analyzer import Analyzer [as 别名]
# 或者: from analyzer.Analyzer import analyze [as 别名]
def search():

    # validate screen_name
    screen_name = request.args.get("screen_name", "")
    if not screen_name:
        return redirect(url_for("index"))
    
    # get screen_name's tweets
    tweets = helpers.get_user_timeline(screen_name, 100)

    # Check if tweets array contains None
    if tweets is None:
        sys.exit("Error: No tweets was returned!")
    
    # Absolute paths to lists 
    positives = os.path.join(sys.path[0], "positive-words.txt")
    negatives = os.path.join(sys.path[0], "negative-words.txt")
    
    # Initialize an Analyze object
    analyzer = Analyzer(positives, negatives)
    
    # Initialize sentiment analysis counts for chart values
    positive, negative, neutral = 0.0, 0.0, 0.0
    
    # Iterate through tweets 
    for tweet in tweets:
        
        # Return score analysis for tweet
        score = analyzer.analyze(tweet)
        
        # Increment respective sentiment analysis counts
        if score > 0.0:
            positive += 1
        elif score < 0.0:
            negative += 1
        else:
            neutral += 1
        
    # Set sentiment analysis counts to percentages
    num_tweets = positive + negative + neutral

    positive = positive / num_tweets
    negative = negative / num_tweets
    neutral = neutral / num_tweets

    # generate chart
    chart = helpers.chart(positive, negative, neutral)

    # render results
    return render_template("search.html", chart=chart, screen_name=screen_name)
开发者ID:robgoyal,项目名称:OnlineCourses,代码行数:52,代码来源:application.py

示例9: re_analyze

# 需要导入模块: from analyzer import Analyzer [as 别名]
# 或者: from analyzer.Analyzer import analyze [as 别名]
def re_analyze():
    """
    Unpickles preprocessed tweets and performs reanalyzis of these, then stores stats.
    """
    labels = ["random",'"rosenborg"','"erna solberg"']
    data = {}
    worddata = {}
    for i in xrange(3):
        tweets = utils.get_pickles(i)
        analyzer = Analyzer(utils.annotated_datasets[i], tweets)
        
        avg_list,words_list= analyzer.analyze()
        print avg_list
        worddata[labels[i]] = words_list
        data[labels[i]] = avg_list
    plotting.average_wordclasses(worddata, "averages")

    plotting.detailed_average_wordclasses(data, "averages2")
开发者ID:JohnArneOye,项目名称:twitter-sentiment,代码行数:20,代码来源:preprocessing.py

示例10: search

# 需要导入模块: from analyzer import Analyzer [as 别名]
# 或者: from analyzer.Analyzer import analyze [as 别名]
def search():

    # validate screen_name
    screen_name = request.args.get("screen_name", "")
    if not screen_name:
        return redirect(url_for("index"))

    # get screen_name's tweets
    tweets = helpers.get_user_timeline(screen_name, 100)

    # handle get_user_timeline errors
    if tweets == None:
        return redirect(url_for("index"))

    # absolute paths to lists
    positives = os.path.join(sys.path[0], "positive-words.txt")
    negatives = os.path.join(sys.path[0], "negative-words.txt")

    # instantiate analyzer
    analyzer = Analyzer(positives, negatives)

    # counts for sentiment categories
    pos_count, neg_count, neut_count = 0, 0, 0

    # score and assign sentiment category to each tweet
    for tweet in tweets:
        score = analyzer.analyze(tweet)
        if score > 0.0:
            pos_count += 1
        elif score < 0.0:
            neg_count += 1
        else:
            neut_count += 1

    whole = pos_count + neg_count + neut_count
    positive, negative, neutral = (pos_count / whole), (neg_count / whole), (neut_count / whole)

    # generate chart
    chart = helpers.chart(positive, negative, neutral)

    # render results
    return render_template("search.html", chart=chart, screen_name=screen_name)
开发者ID:machajew,项目名称:CS50,代码行数:44,代码来源:application.py

示例11: search

# 需要导入模块: from analyzer import Analyzer [as 别名]
# 或者: from analyzer.Analyzer import analyze [as 别名]
def search():

    # validate screen_name
    screen_name = request.args.get("screen_name", "").lstrip("@")
    if not screen_name:
        return redirect(url_for("index"))

    # absolute paths to lists
    positives = os.path.join(sys.path[0], "positive-words.txt")
    negatives = os.path.join(sys.path[0], "negative-words.txt")

    # instantiate analyzer
    analyzer = Analyzer(positives, negatives)
    
    # get screen_name's most recent 100 tweets
    tweets = helpers.get_user_timeline(screen_name, 100)
    
    # return to index if screen_name doesn't exist
    if tweets == None:
        return redirect(url_for("index"))
        
    # create positive, negative and neutral count
    positive, negative, neutral = 0, 0, 0
    
    # analyze each tweet & increase corresponding sentimen count
    for tweet in tweets:
        score = analyzer.analyze(tweet)
        if score > 0.0:
            positive += 1
        elif score < 0.0:
            negative += 1
        else:
            neutral += 1

    # generate chart
    chart = helpers.chart(positive, negative, neutral)

    # render results
    return render_template("search.html", chart=chart, screen_name=screen_name)
开发者ID:Meladsafi,项目名称:cs50,代码行数:41,代码来源:application.py

示例12: do_analyze

# 需要导入模块: from analyzer import Analyzer [as 别名]
# 或者: from analyzer.Analyzer import analyze [as 别名]
    def do_analyze(self, args_str):
        parser = self._get_arg_parser()
        parser.add_argument("-o", "--output",
                            metavar="FILE", dest="output",
                            help="specific output dir or file"),
        parser.add_argument("-t", "--threads",
                            type=int, dest="threads", default=multiprocessing.cpu_count(),
                            help="threads number to work [default equal cpu count]")
        parser.add_argument("--plot-all",
                            action="store_true", dest="plot_all", default=False,
                            help="plot all stocks, not only good ones")
        parser.add_argument('codes', nargs='*')
        options = self._parse_arg(parser, args_str)
        if not options:
            return

        schemes = []
        user_options = []
        for k, v in self.config['analyzing']['schemes'].items():
            schemes.append(v)
            user_options.append(v['desc'])
        select = util.select(user_options, 'please select a scheme used for analyzing')
        config = schemes[select]['config']
        logging.info('analyzer config:\n%s' % yaml.dump(config))

        if not self.loaded:
            self.do_load()

        stocks = {}
        if len(options.codes):
            for code in options.codes:
                if code in self.dm.stocks:
                    stocks[code] = self.dm.stocks[code]
                else:
                    logging.error('unknown stock %s', code)
        else:
            stocks = self.dm.stocks

        if not len(stocks):
            logging.error('no stocks found in local database, please run \'load\' command first')
            return

        analyzer = Analyzer(stocks, self.dm.indexes, config)
        logging.info('all %d available stocks will be analyzed' % len(analyzer.stocks))
        logging.info('-----------invoking data analyzer module-------------')
        analyzer.analyze(threads=options.threads)
        logging.info('-------------------analyze done----------------------')

        list = []
        for result in analyzer.good_stocks:
            stock = result.stock
            list.append({'code': stock.code, 'name': stock.name, 'price': stock.price,
                         'pe': stock.pe, 'nmc': stock.nmc / 10000, 'mktcap': stock.mktcap / 10000,
                         'toavgd5': '%.2f%%' % stock.get_turnover_avg(5),
                         'toavgd30': '%.2f%%' % stock.get_turnover_avg(30),
                         'area': stock.area, 'industry': stock.industry
                         })
        df = DataFrame(list)
        if df.empty:
            logging.info('no good stocks found')
            return

        logging.info('list of good %d stocks%s:' % (len(analyzer.good_stocks),
                                                    options.output and ' and save plots to %s' % options.output or ''))
        print(df.to_string(
            columns=('code', 'name', 'price', 'pe', 'nmc', 'mktcap', 'toavgd5', 'toavgd30', 'area', 'industry')))
        logging.info('global market status: %s' % analyzer.global_status)

        if options.output:
            logging.info('generating html report...')
            os.makedirs(options.output, exist_ok=True)
            analyzer.generate_report(options.output, only_plot_good=not options.plot_all)
            logging.info('done')
开发者ID:changbindu,项目名称:rufeng-finance,代码行数:75,代码来源:rufeng_finance.py

示例13: __init__

# 需要导入模块: from analyzer import Analyzer [as 别名]
# 或者: from analyzer.Analyzer import analyze [as 别名]
class CameraProcessor:
  def __init__(self, camera, gui):
    self.analyzer = Analyzer(gui)
    self.cap = cv2.VideoCapture(camera)
    self.callibration_state = 0
    self.callibration_message = [
      "Please click the plus sign with the circle around it",
      "Please click the plus sign WITHOUT the circle around it",
      "Got it!"
    ]
    self.create_images_dir()
    self.gui = gui
    gui.subscribe('color_threshold', self)
    gui.subscribe('blob_threshold', self)
    gui.subscribe('area_threshold', self)
    
  def create_images_dir(self):
    try:
      os.mkdir("images")
    except:
      pass
      
  def handle_callibration_click(self, event,x,y,flags,param):
    if event == 1:
      self.analyzer.set_callibration(self.callibration_state, (x,y))
      print("Setting callibration point %d to (%d, %d)" % (self.callibration_state, x, y))
      self.callibration_state += 1
      print(self.callibration_message[self.callibration_state])
      
  def callibrate(self):
    cv2.namedWindow('callibration')
    cv2.setMouseCallback('callibration',self.handle_callibration_click)
    print(self.callibration_message[self.callibration_state])
    while self.callibration_state < 2:
      ret, frame = self.cap.read()
      resized = cv2.resize(frame, (800,600))
      cv2.imshow( "callibration" ,resized)
      cv2.waitKey(1)
    cv2.destroyWindow("callibration")
    
  def detected_dice(self):
    if len(self.analyzer.detected_dice) == 0: return None 
    return self.analyzer.detected_dice[0]
  
  def save_frame(self):
    filename = "images/%s.jpg" % str(uuid.uuid4())
    ret, frame = self.cap.read()
    print("Writing %s" % filename)
    cv2.imwrite(filename, frame)
    
  def process(self):
    ret, frame = self.cap.read()
    self.process_image(frame)
    
  def process_image(self, frame):
    resized = cv2.resize(frame, (800,600))
    self.analyzer.analyze(resized, frame)
  
  
  def set_parameter(self, name, value):
    if name == 'color_threshold':
      self.analyzer.color_threshold = value
    elif name == 'blob_threshold':
      self.analyzer.blob_threshold = value
    elif name == 'area_threshold':
      self.analyzer.set_area_threshold(value * 100)
    
  def report_blobs(self):
    self.analyzer.report()
  
  def teardown(self):
    self.cap.release()
    
  def run_test(self):
    while True:
      self.process()
      key = cv2.waitKey(1)
      if key & 0xFF == ord('q'):
        break
      if key & 0xFF == ord('r'):
        self.report_blobs()
      if key & 0xFF == ord('s'):
        self.save_frame()
    self.teardown()
开发者ID:richmans,项目名称:dice_checker,代码行数:86,代码来源:camera.py

示例14: Observable

# 需要导入模块: from analyzer import Analyzer [as 别名]
# 或者: from analyzer.Analyzer import analyze [as 别名]
from setup import Configuration
cfg=Configuration()
from observables import Observable

cfg.parametersSet['region'] = 'SR'
cfg.parametersSet['observable'] = 'met'
cfg.parametersSet['lumi'] = '10000' # pb^-1
#cfg.parametersSet['observable'] = Observable(variable='ZpT',formula='z_pt',labelX='Z p_{T} [GeV]')
#cfg.parametersSet['selection'] = '{"leading jet":"jets_pt[0]>120"}'

label = str(hash(frozenset(cfg.parametersSet.items())))

from analyzer import Analyzer
analyzer = Analyzer(cfg, label)
analyzer.analyze()
analyzer.format_histograms()
analyzer.draw('pippo')

histograms = analyzer.formatted_histograms

# manage output
output_file = TFile('plots.root' if not cfg.parametersSet.has_key('output_name') else cfg.parametersSet['output_name'],
                    'recreate')
for h in histograms: histograms[h].Write()
output_file.Close()

output_file = TFile('logs/log.root', 'update')
output_file.mkdir(label)
output_file.cd(label)
for h in histograms: histograms[h].Write()
开发者ID:SiewYan,项目名称:DMPD,代码行数:32,代码来源:process.py

示例15: analyze

# 需要导入模块: from analyzer import Analyzer [as 别名]
# 或者: from analyzer.Analyzer import analyze [as 别名]
    def analyze (self, v1, v2=None):
        """analyze(v1, v2=None) 
           Calculates EOF principal components. 
           Sets the following attributes: 
              'principal_components' 
              'eigenvectors'
              'percent_explained' 
        """
        g = v1.getGrid()
        if g is None:
            raise ValueError, "u does not have spatial dimensions."
        latw, longw = g.getWeights()
        latw = latw / numpy.maximum.reduce(latw)
        if self.latweight_choice() == 'none':
            latweight = 1.0 + 0.0 * latw
        elif self.latweight_choice() == 'area':
            latweight = latw 
        else:
            latweight = numpy.sqrt(latw)
        mean_choice = self.mean_choice()
        nr = self.number_of_components()
    
        lat_axis = v1.getLatitude()
        long_axis = v1.getLongitude()
        time_axis = v1.getTime()
        if time_axis is None:
            raise ValueError, "v1 has no time dimension" 
        nlat = len(lat_axis)
        nlong = len(long_axis)
        ntime = len(time_axis)
        nvar = nlat*nlong
        ax1 = v1(order='...x').getAxisList(omit='time')
        
        if v2 is not None: 
            time_axis_2 = v2.getTime()
            if time_axis_2 is None:
                raise ValueError, 'v2 has no time dimension'
            if not numpy.allclose(time_axis, time_axis_2):
                raise ValueError, 'v1 and v2 have incompatible time axes'
            nvar = 2 * nvar
            ax2 = v2(order='...x').getAxisList(omit='time')
        x = numpy.zeros((ntime, nvar), numpy.float)
    
        for ilat in range(nlat):
            udata = v1.getSlice(latitude=ilat, 
                                required='time',
                                order='t...x', 
                                raw=1)
            if udata.mask is not numpy.ma.nomask:
                raise ValueError, 'eof cannot operate on masked data'
            if numpy.ma.rank(udata) != 2:
                raise ValueError, 'eof cannot handle extra dimension'
            udata = udata.filled()
            x[:, ilat*nlong: (ilat+1)*nlong] = \
                self.__adjust(udata, ntime, mean_choice) * \
                              latweight[numpy.newaxis, ilat]
        
        del udata
        if v2 is not None:
            for ilat in range(nlat):
                udata = v1.getSlice(latitude=ilat, 
                                    required='time',
                                    order='t...x', 
                                    raw=1)
                if udata.mask is not numpy.ma.nomask:
                    raise ValueError, 'eof cannot operate on masked data'
                if numpy.ma.rank(udata) != 2:
                    raise ValueError, 'eof cannot handle extra dimension'
                udata = udata.filled()
                x[:, nlat*nlong + ilat*nlong: nlat*nlong + (ilat+1)*nlong] = \
                    self.__adjust(udata[:, ilat, :], ntime, mean_choice) * \
                              latweight[numpy.newaxis, ilat]
            del udata
      
        a = Analyzer ()
        
        a.analyze (x, nr = nr)
    
    # Compute weighted eigenvectors
        evs = a.evec
        pcs = numpy.matrix(x)*numpy.matrix(evs)
        number_of_components = len(a.eval)
        result = []
        for k in range(number_of_components):
            evs1 = numpy.reshape(evs[0:nlong*nlat, k], (nlat, nlong))
            evs1 = evs1 / latweight[:, numpy.newaxis]
            pc = cdms2.createVariable(evs1, copy=0, axes=ax1, 
                          id=v1.id+'_'+str(k+1),
                          attributes = v1.attributes)
            result.append(pc)
            if v2:
                evs1 = numpy.reshape(evs[nlong*nlat:2*nlong*nlat, k], 
                                       (nlat, nlong))
                evs1 = evs1 / latweight[:, numpy.newaxis]
                pc = cdms2.createVariable(evs1, copy=0, axes=ax2, 
                          id=v2.id+'_'+str(k+1),
                          attributes = v2.attributes)
                result.append(pc)

        self.principal_components = result
#.........这里部分代码省略.........
开发者ID:NCPP,项目名称:uvcdat-devel,代码行数:103,代码来源:eof1.py


注:本文中的analyzer.Analyzer.analyze方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。