当前位置: 首页>>代码示例>>Python>>正文


Python utils.write函数代码示例

本文整理汇总了Python中utils.write函数的典型用法代码示例。如果您正苦于以下问题:Python write函数的具体用法?Python write怎么用?Python write使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了write函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: trade_reciprocity

def trade_reciprocity(years,resource):
  corrmeans = []
  for year in years:
    G = get_graph(year,resource)
    corrcoeffs = []
    [xs,ys] = [[],[]]
    for country in G.nodes():
      for e in G.edges(country):
        try:
          [x1,y1] = [G[e[0]][e[1]],G[e[1]][e[0]]]
          #print [x1,y1]
          xs.append(x1['weight'])
          ys.append(y1['weight'])
        except KeyError:
          'whoops'
    if len(xs)>1:
      cc = np.corrcoef([xs,ys])
      corrcoeffs.append(cc[0][1])
    #print corrcoeffs
    corrmeans.append(np.mean(corrcoeffs))
    print [year,np.mean(corrcoeffs)]
  write({'means':corrmeans, 'years':years},get_results_directory(resource),'meanReciprocityCorrelation')
  plt.clf()
  plt.plot(years,corrmeans)
  plt.title('Mean Correlation of Import/Export By Year')
  plt.xlabel('Year')
  plt.ylabel('Mean Correlation of Import/Export')
  directory = get_images_directory(resource)
  plt.savefig(directory+'meanReciprocityCorrelation.png')
  plt.clf()
  return 0
开发者ID:hoqqanen,项目名称:itn,代码行数:31,代码来源:networkExplorer.py

示例2: write_bill_catoxml

def write_bill_catoxml(bill_version_id, options):
  utils.write(
    extract_xml_from_json(fetch_single_bill_json(bill_version_id)),
    document_filename_for(bill_version_id, "catoxml.xml")
  )

  return {'ok': True, 'saved': True}
开发者ID:JT5D,项目名称:congress,代码行数:7,代码来源:deepbills.py

示例3: panel

    def panel(self) :
        """
        Set up the side panel
        """

        self.disp.blit(IMG_SIDEPANEL_BG, (16*SQUARE, 16, 450, 496))
        self.btn_traps = []
        x = 16*SQUARE+65
        y = 20
        for i in TRAPS :
            self.disp.blit(i[0], (x,y))

            name = utils.write(i[1], BLACK)
            price = utils.write(str(i[3]), GRAY)
            lines = utils.formattext(i[2], 35, BLACK, 15)

            self.disp.blit(name, (x+40,y+2))
            self.disp.blit(price, (x+275, y+2))

            i = 20
            for l in lines :
                self.disp.blit(l, (x+40, y+i))
                i += 15

            self.btn_traps.append( (x, y, 330, i+5) )

            y += 75

        self.disp.blit(IMG_LEVEL, RECT_LEVEL)
        self.disp.blit(IMG_MONEY, RECT_MONEY)
        self.disp.blit(IMG_LAB_QUIT, BTN_LAB_QUIT)
        self.disp.blit(IMG_LAB_START, BTN_LAB_START)

        self.updatepanel()
开发者ID:megyland,项目名称:projetisn,代码行数:34,代码来源:Lab.py

示例4: update_bill_version_list

def update_bill_version_list(only_congress):
    bill_versions = {}

    # Which sitemap years should we look at?
    if not only_congress:
        sitemap_files = glob.glob(utils.cache_dir() + "/fdsys/sitemap/*/BILLS.xml")
    else:
        # If --congress=X is specified, only look at the relevant years.
        sitemap_files = [
            utils.cache_dir() + "/fdsys/sitemap/" + str(year) + "/BILLS.xml"
            for year in utils.get_congress_years(only_congress)
        ]
        sitemap_files = [f for f in sitemap_files if os.path.exists(f)]

    # For each year-by-year BILLS sitemap...
    for year_sitemap in sitemap_files:
        dom = etree.parse(year_sitemap).getroot()
        if dom.tag != "{http://www.sitemaps.org/schemas/sitemap/0.9}urlset":
            raise Exception("Mismatched sitemap type.")

        # Loop through each bill text version...
        for file_node in dom.xpath("x:url", namespaces=ns):
            # get URL and last modified date
            url = str(file_node.xpath("string(x:loc)", namespaces=ns))
            lastmod = str(file_node.xpath("string(x:lastmod)", namespaces=ns))

            # extract bill congress, type, number, and version from the URL
            m = re.match(r"http://www.gpo.gov/fdsys/pkg/BILLS-(\d+)([a-z]+)(\d+)(\D.*)/content-detail.html", url)
            if not m:
                raise Exception("Unmatched bill document URL: " + url)
            congress, bill_type, bill_number, version_code = m.groups()
            congress = int(congress)
            if bill_type not in utils.thomas_types:
                raise Exception("Invalid bill type: " + url)

            # If --congress=XXX is specified, only look at those bills.
            if only_congress and congress != only_congress:
                continue

            # Track the documents by congress, bill type, etc.
            bill_versions.setdefault(congress, {}).setdefault(bill_type, {}).setdefault(bill_number, {})[
                version_code
            ] = {"url": url, "lastmod": lastmod}

    # Output the bill version info. We can't do this until the end because we need to get
    # the complete list of versions for a bill before we write the file, and the versions
    # may be split across multiple sitemap files.

    for congress in bill_versions:
        for bill_type in bill_versions[congress]:
            for bill_number in bill_versions[congress][bill_type]:
                utils.write(
                    json.dumps(
                        bill_versions[congress][bill_type][bill_number],
                        sort_keys=True,
                        indent=2,
                        default=utils.format_datetime,
                    ),
                    output_for_bill(congress, bill_type, bill_number, "text-versions.json"),
                )
开发者ID:milimetric,项目名称:congress,代码行数:60,代码来源:fdsys.py

示例5: split_signatures

def split_signatures(pid, signatures=None):
    if not signatures:
        signatures = json.load(open(os.getcwd() + "/data/api/signatures/" + pid + ".json", "r"))
        
    for signature in signatures:
        signature['date'] = datetime.datetime.fromtimestamp(signature['created']).strftime("%y-%m-%d")
        signature['time'] = datetime.datetime.fromtimestamp(signature['created']).strftime("%H:%M:%S")
        #rm this needless field
        if signature['type'] == "signature":
            signature.pop("type")

    dates = sorted(set(map(lambda x:x['date'], signatures)))
    mostrecent = max([x['created'] for x in signatures])
    
    stats = {
        'total': len(signatures),
        'dates': [],
        'last': datetime.datetime.fromtimestamp(mostrecent).strftime("%y-%m-%d"),
        'laststamp': mostrecent
    }
    
    for day in dates:
        sigs = [x for x in signatures if x['date'] == day]
        stats['dates'].append((day, len(sigs)))
        write(json.dumps(sigs), "api/signatures/" + pid + "/" + day + ".json")
        
    write(json.dumps(stats, indent=2), "api/signatures/" + pid + "/stats.json")
开发者ID:imclab,项目名称:petitions,代码行数:27,代码来源:whitehouse.py

示例6: write_report

def write_report(report):
  data_path = "%s/%s/%s/report.json" % (report['inspector'], report['year'], report['report_id'])
  utils.write(
    utils.json_for(report),
    "%s/%s" % (utils.data_dir(), data_path)
  )
  return data_path
开发者ID:MRumsey,项目名称:inspectors-general,代码行数:7,代码来源:inspector.py

示例7: write_bill_version_metadata

def write_bill_version_metadata(bill_version_id):
  bill_type, number, congress, version_code = utils.split_bill_version_id(bill_version_id)

  bill_version = {
    'bill_version_id': bill_version_id,
    'version_code': version_code,
    'urls': { },
  }

  mods_ns = {"mods": "http://www.loc.gov/mods/v3"}
  doc = etree.parse(document_filename_for(bill_version_id, "mods.xml"))
  locations = doc.xpath("//mods:location/mods:url", namespaces=mods_ns)

  for location in locations:
    label = location.attrib['displayLabel']
    if "HTML" in label:
      format = "html"
    elif "PDF" in label:
      format = "pdf"
    elif "XML" in label:
      format = "xml"
    else:
      format = "unknown"
    bill_version["urls"][format] = location.text

  bill_version["issued_on"] = doc.xpath("string(//mods:dateIssued)", namespaces=mods_ns)

  utils.write(
    json.dumps(bill_version, sort_keys=True, indent=2, default=utils.format_datetime), 
    output_for_bill_version(bill_version_id)
  )

  return {'ok': True, 'saved': True}
开发者ID:GPHemsley,项目名称:congress,代码行数:33,代码来源:bill_versions.py

示例8: run

def run(options):
  # Load the committee metadata from the congress-legislators repository and make a
  # mapping from thomas_id and house_id to the committee dict. For each committee,
  # replace the subcommittees list with a dict from thomas_id to the subcommittee.
  utils.require_congress_legislators_repo()
  committees = { }
  for c in utils.yaml_load("congress-legislators/committees-current.yaml"):
    committees[c["thomas_id"]] = c
    if "house_committee_id" in c: committees[c["house_committee_id"] + "00"] = c
    c["subcommittees"] = dict((s["thomas_id"], s) for s in c.get("subcommittees", []))

  for chamber in ("house", "senate"):
    # Load any existing meetings file so we can recycle GUIDs generated for Senate meetings.
    existing_meetings = []
    output_file = utils.data_dir() + "/committee_meetings_%s.json" % chamber
    if os.path.exists(output_file):
      existing_meetings = json.load(open(output_file))

    # Scrape for meeting info.
    if chamber == "senate":
      meetings = fetch_senate_committee_meetings(existing_meetings, committees, options)
    else:
      meetings = fetch_house_committee_meetings(existing_meetings, committees, options)

    # Write out.
    utils.write(json.dumps(meetings, sort_keys=True, indent=2, default=utils.format_datetime),
      output_file)
开发者ID:GPHemsley,项目名称:congress,代码行数:27,代码来源:committee_meetings.py

示例9: combine

def combine():
    roster = defaultdict(list)    
    total = [defaultdict(int) for x in range(segments)]
    starts = {}
    data = json.load(open("data/times/all.json", "r"))
    duds = 0
    co = 0
    for runner in data:
        #print runner["bib number"], runner["5K"]
        #see if he/she showed up
        if "5K" not in runner or not runner["5K"][1]:
            duds += 1
            continue
        co += 1
        if co % 100 == 0:
            print co
        #placement will represent which marker he/she was closest to at each interval
        placement = ["0" for x in range(segments)]
        #stamps is the timestamps scraped from BAA.org
        stamps = [runner[x][1] for x in posts]
        marker = 0

        #fill in placement with most recent split time (intervals of 5K + half and finish)
        for c in range(segments):
            if c > 0:
                placement[c] = placement[c - 1]
            if marker < len(posts) and stamps[marker] and stamps[marker] < c * INTERVAL:
                placement[c] = posts[marker]
                marker += 1

        placement = [int(x.replace("K", "").replace("Finish Net", "42").replace("HALF", "21")) for x in placement]
        #print placement
        #print runner["bib number"]
        
        #calculate interpolations between kilometer marks

        #start at appropriate place for offset in starting point
        c = int(round(runner["0K"] / INTERVAL))
        while c < len(placement):
            if placement[c] == placement[-1] or c >= len(placement) - 2:
                break
            t = 1
            while c+t < len(placement) and placement[c + t] == placement[c]:
                t += 1
            #print c, t, placement[c+t], placement[c]
            step = float(placement[c+t]-placement[c]) / t
            for i in range(1, t):
                placement[c + i] = int(math.floor(placement[c + i] + i * step))
            c += t

        #print placement
        key = "_".join([str(x) for x in placement])
        roster[key].append(runner["bib number"])

        for c in range(segments):
            total[c][placement[c]] += 1
        

    write(json.dumps(roster, indent=2), "times/condensed.json")
    write(json.dumps(total, indent=2), "times/condensed_time.json")
开发者ID:wilson428,项目名称:boston_marathon,代码行数:60,代码来源:analyze.py

示例10: main

def main():
    parser = argparse.ArgumentParser(description="Retrieve petitions from We The People")
    parser.add_argument("-m", "--max", metavar="INTEGER", dest="max", type=int, default=None,
                        help="maximum pages of petitions to retrieve, default is 10, 100 per page")
    parser.add_argument("-s", "--start", metavar="INTEGER", dest="start", type=int, default=1,
                        help="starting page, 100 per page, default is 1")
    parser.add_argument("-q", "--query", metavar="STRING", dest="query", type=str, default="whitehouse+petition",
                        help="The query for searching twitter for petition links, default is 'whitehouse+petition'")
    args = parser.parse_args()

    if args.max is not None and args.max < 1:
        parser.error("How can I scrape less than one pages of twitter results? You make no sense! --max must be one or greater.")

    if args.start < 1:
        parser.error("--start must be one or greater.")

    if not len(sys.argv) > 1:
        log('Running with default values. Use --h to see options.')

    search(args.query, args.start, args.max)

    #write log
    scrapelog["query"] = args.query
    scrapelog["end"] = datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
    write(json.dumps(scrapelog, indent=2), "log-tw-" + scrapelog["begin"] + ".json", log_dir())
    log("Done. Found total %i petitions" % (len(scrapelog["signatures"])))
开发者ID:imclab,项目名称:petitions,代码行数:26,代码来源:twitter.py

示例11: do

    def do(self):
        log_msg = 'Tagging: "%s" as "%s"' % (self._revision, self._name)
        opts = {}
        if self._message:
            opts['F'] = utils.tmp_filename('tag-message')
            utils.write(opts['F'], self._message)

        if self._sign:
            log_msg += ', GPG-signed'
            opts['s'] = True
            status, output = self.model.git.tag(self._name,
                                                self._revision,
                                                with_status=True,
                                                with_stderr=True,
                                                **opts)
        else:
            opts['a'] = bool(self._message)
            status, output = self.model.git.tag(self._name,
                                                self._revision,
                                                with_status=True,
                                                with_stderr=True,
                                                **opts)
        if 'F' in opts:
            os.unlink(opts['F'])

        if output:
            log_msg += '\nOutput:\n%s' % output

        _notifier.broadcast(signals.log_cmd, status, log_msg)
        if status == 0:
            self.model.update_status()
开发者ID:dannyfeng,项目名称:gitGUI,代码行数:31,代码来源:cmds.py

示例12: fetch_version

def fetch_version(bill_version_id, options):
  logging.info("\n[%s] Fetching..." % bill_version_id)
  
  bill_type, number, congress, version_code = utils.split_bill_version_id(bill_version_id)
  # bill_id = "%s%s-%s" % (bill_type, number, congress)

  mods_filename = filename_for(bill_version_id)
  mods_cache = version_cache_for(bill_version_id, "mods.xml")
  issued_on, urls = fdsys.document_info_for(mods_filename, mods_cache, options)
  
  bill_version = {
    'issued_on': issued_on,
    'urls': urls,
    'version_code': version_code,
    'bill_version_id': bill_version_id
  }

  # 'bill_version_id': bill_version_id,
  #   'version_code': version_code

  utils.write(
    json.dumps(bill_version, sort_keys=True, indent=2, default=utils.format_datetime), 
    output_for_bill_version(bill_version_id)
  )

  return {'ok': True, 'saved': True}
开发者ID:ArlingtonHouse,项目名称:congress,代码行数:26,代码来源:bill_versions.py

示例13: main

def main():
    parser = argparse.ArgumentParser(description="Retrieve petitions from We The People")
    parser.add_argument(
        "-m",
        "--max",
        metavar="INTEGER",
        dest="max",
        type=int,
        default=None,
        help="maximum number of petitions to retrieve",
    )
    parser.add_argument(
        "-s",
        "--start",
        metavar="INTEGER",
        dest="start",
        type=int,
        default=1,
        help="starting page, 20 per page, default is 1",
    )
    args = parser.parse_args()

    if args.max is not None and args.max < 1:
        parser.error("How can I scrape less than one petition? You make no sense! --max must be one or greater.")

    if args.start < 1:
        parser.error("--start must be one or greater.")

    log("Found %i petitions" % (petitions(args.start, args.max)))

    # write log
    scrapelog["end"] = datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
    write(json.dumps(scrapelog, indent=2), "log-wh-" + scrapelog["begin"] + ".json", log_dir())
开发者ID:pallih,项目名称:petitions,代码行数:33,代码来源:petitions.py

示例14: process

def process(inFile, outFile, targets, algo):
    capture = cv2.VideoCapture(inFile)
    retval, image = capture.read()
    locations = []
    if retval:
        writer = cv2.VideoWriter(outFile + ".avi", 
            fps=25,
            fourcc=cv2.cv.CV_FOURCC(*"DIVX"),
            frameSize=image.shape[0:2][::-1])
        algorithms = []
        for x in targets:
            algo.start(image, x)
            algorithms.append(algo)
            utils.drawTarget(image, algo.target)
        writer.write(image)

    w,h = image.shape[:2]
    while retval:       
        retval, image = capture.read()
        target = np.array(algo.target) / np.array([h, w, h, w], dtype=np.float32)
        locations.append(target)
        if retval:
            for algo in algorithms:
                algo.next(image)
                color = (255, 0, 0)
                if algo.valid:
                    color = (0, 255, 0)
                utils.drawTarget(image, algo.target, color)
            writer.write(image)

    utils.write(outFile + ".txt", inFile, locations)
开发者ID:snuderl,项目名称:VideoTracking,代码行数:31,代码来源:video.py

示例15: fetch_votes

def fetch_votes(session, rootdir):
    #get list of all votes from session from GovTrack
    votes = parse("http://www.govtrack.us/data/us/%s/rolls/" % session)

    for vote in [x for x in votes.xpath("//a/@href") if x[-4:] == ".xml"]:
        chamber = "house" if vote[0] == 'h' else "senate"
        url = "http://www.govtrack.us/data/us/%s/rolls/%s" % (session, vote)
        doc = download(url, session + "/" + vote)
        doc = doc.replace("&", "&amp;")
        try:
            markup = lxml.objectify.fromstring(doc)
        except Exception, e:
            print "Couldn't read", url
            print e
            continue
        data = {}
        data["rollcall"] = {}
        #walk through xml and collect key/value pairs
        for el in markup.getiterator():
            if el.attrib == {}:
                data[el.tag] = el.text
            elif el.tag == 'voter':
                data["rollcall"][el.attrib["id"]] = el.attrib["value"]
        print rootdir + "/data/json/%s/%s/%s.json" % (chamber, session, vote[:-4])
                
        write(json.dumps(data, indent=2), rootdir + "/data/json/%s/%s/%s.json" % (chamber, session, vote[:-4]))
开发者ID:cmiller8,项目名称:force_talk,代码行数:26,代码来源:fetch.py


注:本文中的utils.write函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。