当前位置: 首页>>代码示例>>Python>>正文


Python Utils.make_tag方法代码示例

本文整理汇总了Python中utils.Utils.make_tag方法的典型用法代码示例。如果您正苦于以下问题:Python Utils.make_tag方法的具体用法?Python Utils.make_tag怎么用?Python Utils.make_tag使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在utils.Utils的用法示例。


在下文中一共展示了Utils.make_tag方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get

# 需要导入模块: from utils import Utils [as 别名]
# 或者: from utils.Utils import make_tag [as 别名]
 def get(self):
     
     table = self.request.get("table", None)
     
     response = rules.get_rules(**GNIP_RULES_PARAMS)
     
     FORMAT = "%Y%m%d%H%M"
     end = datetime.now()
     
     # scope by table
     if table:
         
         (project, dataset, table) = Utils.parse_bqid(table)
         tag = Utils.make_tag(dataset, table)
         response = [r for r in response if r['tag'] == tag]
         
         table = Utils.get_bq().tables().get(projectId=project, datasetId=dataset, tableId=table).execute()
         
         end = float(table['creationTime'])
         end = Utils.millis_to_date(end)
         
     start = end - timedelta(days=SEARCH_DAYS)
                     
     for r in response:
         tag = r['tag']
         r['hpt'] = json.dumps({
         "publisher": "twitter", 
         "streamType": "track", 
         "dataFormat": "activity-streams", 
         "fromDate": start.strftime(FORMAT), 
         "toDate": end.strftime(FORMAT),
         "rules": [{"tag": tag, "value": r['value']}], "title": tag})
         
     self.response.headers['Content-Type'] = 'application/json'   
     self.response.out.write(json.dumps(response))
开发者ID:HectorPerez,项目名称:twitter-for-bigquery,代码行数:37,代码来源:app.py

示例2: start

# 需要导入模块: from utils import Utils [as 别名]
# 或者: from utils.Utils import make_tag [as 别名]
    def start(schema, logger):

        # initialize table mapping for default table
        # BUGBUG: initialize based on query to prod
        table_mapping = {
             config.DATASET_ID + "." + config.TABLE_ID : [config.DATASET_ID, config.TABLE_ID]
         }

        datasets = Utils.get_bq().datasets().list(projectId=config.PROJECT_ID).execute()
        datasets = datasets.get("datasets", None)

        for d in datasets:
            ref = d.get("datasetReference", None)
            bq_tables = Utils.get_bq().tables().list(projectId=ref.get("projectId"), datasetId=ref.get("datasetId")).execute()
            if bq_tables['totalItems'] > 0:
                for t in bq_tables.get("tables", None):
                    ref = t.get("tableReference", None)
                    dataset_id = ref.get("datasetId", None)
                    table_id = ref.get("tableId", None)
                    key = Utils.make_tag(dataset_id, table_id)
                    table_mapping[key] = [dataset_id, table_id]

        print("Initialized tables: %s" % table_mapping)

        listener = GnipListener(schema, table_mapping, logger=logger)

        while True:

            stream = None

            try:
                # clean gnip headers
                _headers = GnipListener.HEADERS
                headers = {}
                for k, v in _headers.items():
                    headers[k] = v.strip()

                #req = urllib2.Request(config.GNIP_STREAM_URL, headers=GnipListener.HEADERS)
                req = urllib2.Request(config.GNIP_STREAM_URL, headers=headers)
                response = urllib2.urlopen(req, timeout=(1+GnipListener.KEEP_ALIVE))

                decompressor = zlib.decompressobj(16+zlib.MAX_WBITS)
                remainder = ''
                while True:
                    tmp = decompressor.decompress(response.read(GnipListener.CHUNK_SIZE))
                    if tmp == '':
                        return
                    [records, remainder] = ''.join([remainder, tmp]).rsplit(NEWLINE,1)
                    listener.on_data(records)

                get_stream(listener)

            except:

                logger.exception("Unexpected error:");

                if stream:
                    stream.disconnect()

                time.sleep(SLEEP_TIME)
开发者ID:HectorPerez,项目名称:twitter-for-bigquery,代码行数:62,代码来源:load.py

示例3: post

# 需要导入模块: from utils import Utils [as 别名]
# 或者: from utils.Utils import make_tag [as 别名]
    def post(self):
        
        rule = self.request.get("rule", None)
        table_fqdn = self.request.get("table", None)
        (dataset, table) = Utils.parse_bqid(table_fqdn)  
        tag = Utils.make_tag(dataset, table)

        page_next = self.request.get("page_next", None)
        page_count = self.request.get("page_count", None)
        count_total = int(self.request.get("count_total", 0))
        if not page_count:
            page_count = 0
        else:
            page_count = int(page_count) + 1

        end = datetime.now()
        start = end - timedelta(days=SEARCH_DAYS)
        
        # for logging purposes, show the estimate 
        if not page_next:
        
            # Initial count
            g = Utils.get_gnip()
            timeline = g.query(rule, 0, record_callback=None, use_case="timeline", start=start, end=end, count_bucket="day")
            timeline = json.loads(timeline)
            
            count_estimate = 0 
            for r in timeline["results"]:
                count_estimate = count_estimate + r["count"]
                
            logging.info("Task start: %s => %s (est. %s)" % (rule, tag, count_estimate))
            
        g = Utils.get_gnip()
        tweets = g.query(rule, use_case="tweets", start=start, end=end, page=page_next)

        try: 
             
            timing_start = datetime.now()
            response = Utils.insert_records(dataset, table, tweets)
            timing = datetime.now() - timing_start

            count_total = count_total + len(tweets)
         
            logging.info("Task page %s: %s => %s (%s, %sms)" % (page_count, rule, tag, count_total, timing))
            
        except:
 
            logging.exception("Unexpected error:");

        page_next = g.rule_payload.get("next", None)
        if page_next:
            self.enqueue(rule, table_fqdn, page_next, page_count=page_count, count_total=count_total)

        response = {
            "completed" : True
        }
        self.response.headers['Content-Type'] = 'application/json'   
        self.response.out.write(json.dumps(response))
开发者ID:jmg132,项目名称:twitter-for-bigquery,代码行数:60,代码来源:app.py

示例4: get

# 需要导入模块: from utils import Utils [as 别名]
# 或者: from utils.Utils import make_tag [as 别名]
    def get(self):
        
        response = []
        
        type = self.request.get("type")

        dataset = self.request.get("dataset")
        if "gnip" in dataset:
            dataset = "gnip"
            schema_file = "./schema/schema_gnip.json"
        else:
            dataset = "twitter"
            schema_file = "./schema/schema_twitter.json"
        
        table = self.request.get("name")
        rule_list = self.request.get("rules")
        imprt = self.request.get("import")

        schema_str = Utils.read_file(schema_file)
        schema = json.loads(schema_str)
        
        Utils.insert_table(dataset, table, schema)
        TABLE_CACHE.clear()
            
        name = Utils.make_tag(dataset, table)
        rule_list = [s.strip() for s in rule_list.splitlines()]
        for r in rule_list:
            
            params = GNIP_RULES_PARAMS
            params['tag'] = name
    
            response.append(rules.add_rule(r, **params))
            TABLE_CACHE.clear()

        self.response.headers['Content-Type'] = 'application/json'   
        self.response.out.write(json.dumps(response)) 
开发者ID:jmg132,项目名称:twitter-for-bigquery,代码行数:38,代码来源:app.py


注:本文中的utils.Utils.make_tag方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。