当前位置: 首页>>代码示例>>Python>>正文


Python simplejson.load函数代码示例

本文整理汇总了Python中simplejson.load函数的典型用法代码示例。如果您正苦于以下问题:Python load函数的具体用法?Python load怎么用?Python load使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了load函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: POST

	def POST(self, id=None, outputs=None, **kwargs):
		if id is None:
			metadata = self.parse_json_dict(simplejson.load(cherrypy.request.body))
			for inp in metadata.get("inputs",[]):
				if '_id' in inp: inp['_id'] = self.parse_value(inp['_id'])

			id = Transformations.insert_one(metadata).inserted_id

			# for inp in metadata.get("inputs",[]):
			# 	Products.update_one({'_id':inp['_id']},{'$push':{'transformations':{'_id':id}}})

			return self.build_response('ok', **dict(transformation={'_id':id}))

		else:
			if outputs == 'outputs':
				metadata = self.parse_json_dict(simplejson.load(cherrypy.request.body))
				resp = simplejson.loads(ProductsController().POST(id=None, metadata=metadata))
				if resp['status'] != 'ok':
					return self.build_response('error',
						message='error creating output product: %s' % resp['message'])
				Transformations.update_one({'_id':ObjectId(id)},{'$push':{'outputs':{'_id':ObjectId(resp['data']['product']['_id'])}}})
				return self.build_response('ok', **dict(product=resp['data']['product']))

			else:
				return self.build_response('error',message='cannot POST metadata for an existing transformation. Use PUT instead.')
开发者ID:dbenders,项目名称:radstore,代码行数:25,代码来源:api.py

示例2: index

def index(request):
    conn = urlopen( settings.RUNMODO_SOLR_URL + '/select?group=true&group.field=eventName&sort=raceStart%20desc&fq=raceStart:[*%20TO%20NOW]&rows=5')
    results = simplejson.load(conn)
    conn = urlopen( settings.RUNMODO_SOLR_URL + '/select?group=true&group.field=eventName&sort=raceStart%20asc&fq=raceStart:[NOW%20TO%20*]&rows=5')
    events = simplejson.load(conn)

    return render_to_response('runapp/index2.html', {'results': results['grouped']['eventName']['groups'], 'events': events['grouped']['eventName']['groups'], 'view':'index'}, context_instance=RequestContext(request) )
开发者ID:solaise73,项目名称:runmodo,代码行数:7,代码来源:views.py

示例3: generate

def generate(mapping, schema, filename):
  mapping = json.load(open(mapping))
  schema = json.load(open(schema))
  schema.pop('$schema', 0)

  # Index for which keys need to be marked as having exact counterparts
  exact_index = get_exact_fields(mapping)
  # Index to lookup a non-array version of key name (so it maps to ES)
  es_items_index = generate_es_to_schema_index(schema)
  # All the fields to process from the schema
  schema_fields = get_schema_fields(schema)

  for field in schema_fields:
    type_field = field + '.' + 'type'
    # No need expand these complex types with DEFAULT attributes
    if get_deep(schema, type_field) in ['object', 'array']:
      continue
    if not field:
      continue

    es_field = es_items_index.get(field, None)
    is_exact = True if es_field in exact_index else False

    # Expand each of the keys to match DEFAULTS.
    # set_deep() will not overwrite data if it does exist, but will add if it
    # does not
    for key, value in DEFAULTS.items():
      full_key = field + '.' + key
      if is_exact and key == 'is_exact':
        value = True

      set_deep(schema, full_key, value)

  make_yaml(schema, filename)
开发者ID:FDA,项目名称:openfda,代码行数:34,代码来源:generate_fields_yaml.py

示例4: run_compare

def run_compare():
    with open('url-content-read.json') as f:
        readability_text = simplejson.load(f)

    with open('url-content-fetcher.json') as f:
        fetcher_text = simplejson.load(f)

    cnt = 0
    z_cnt = 0
    dmp = diff_match_patch()
    rets = []
    for key, value in readability_text.items():
        if key in fetcher_text:
            cnt += 1
            rc = re.sub(r' ', '', value)
            fc = re.sub(r' ', '', fetcher_text[key])
            l_len = len(rc)
            r_len = len(fc)
            retval = dif_content(rc, fc)
            retval_ground = 0
            results = dmp.diff_main(rc, fc)
            for res in results:
                if res[0] == 0:
                    retval_ground += len(res[1])
            print cnt, ': ', l_len, r_len, retval, retval_ground
            real_ret = max(retval, retval_ground)
            rets.append((cnt, l_len, r_len, real_ret))

    with open('diff_result_1', 'w') as f:
        for res in rets:
            print >> f, res[0], ': ', res[1], res[2], res[3]
开发者ID:xtang,项目名称:PyDiff,代码行数:31,代码来源:run_regression.py

示例5: _resume

def _resume(results_dir):
    """Loads a partially completed json results directory."""
    # Pylint can't infer that the json being loaded is a dict
    # pylint: disable=maybe-no-member
    assert os.path.isdir(results_dir), \
        "TestrunResult.resume() requires a directory"

    # Load the metadata
    with open(os.path.join(results_dir, 'metadata.json'), 'r') as f:
        meta = json.load(f)
    assert meta['results_version'] == CURRENT_JSON_VERSION, \
        "Old results version, resume impossible"

    meta['tests'] = collections.OrderedDict()

    # Load all of the test names and added them to the test list
    tests_dir = os.path.join(results_dir, 'tests')
    file_list = sorted(
        (l for l in os.listdir(tests_dir) if l.endswith('.json')),
        key=lambda p: int(os.path.splitext(p)[0]))

    for file_ in file_list:
        with open(os.path.join(tests_dir, file_), 'r') as f:
            try:
                meta['tests'].update(json.load(f))
            except ValueError:
                continue

    return results.TestrunResult.from_dict(meta)
开发者ID:dumbbell,项目名称:piglit,代码行数:29,代码来源:json.py

示例6: read_config

    def read_config(self):
        try:
            logger.info("going to read the sites config file ")
            with open(NewsCrawlerConfig.SITE_FILE, "rbU") as config:
                try:
                    logger.info("going to json load the sites config")
                    json.load(config, object_hook = self.as_newscrawler)
                    self.is_config_read = True
                except Exception as load_e:
                    logger.error(load_e)
                    raise load_e
        except IOError as file_e:
            logger.error(file_e)
            raise file_e

        try:
            logger.info("going to read the crawler configurations")
            with open(NewsCrawlerConfig.CRAWL_FILE, "rbU") as config:
                try:
                    logger.info("going to json load the crawler configs")
                    json.load(config, object_hook = self.as_crawloptions)
                except Exception as load_e:
                    logger.error(load_e)
                    raise load_e
        except IOError as file_e:
            logger.error(file_e)
            raise file_e
开发者ID:subhadeepmaji,项目名称:ml_algorithms,代码行数:27,代码来源:NewsCrawler.py

示例7: query

    def query(self, object_id=None,type=None,connection_type=None, metadata=False):
      response = {}
      import urllib
      import simplejson
      if type == 'fb':
        url = 'https://graph.facebook.com/%s' % (object_id)
        if connection_type:
            url += '/%s' % (connection_type)

        params = {'access_token': self.access_token}
        if metadata:
            params['metadata'] = 1

        url += '?' + urllib.urlencode(params)
        response = simplejson.load(urllib.urlopen(url))
        if 'error' in response:
            error = response['error']
            raise FacebookSessionError(error['type'], error['message'])
      if type == 'github':
        url = 'https://api.github.com/user'
        params  = {'access_token': self.access_token}
        url += '?' + urllib.urlencode(params)
        print "DAY LA URL GITHUB %s " % url
        response = simplejson.load(urllib.urlopen(url))
      return response
开发者ID:cliffkimani,项目名称:django-football-club-manager,代码行数:25,代码来源:models.py

示例8: __init__

	def __init__(self, authtype = "OAuth", username = None, password = None, oauth_keys = None, headers = None):
		self.authtype = authtype
		self.authenticated = False
		self.username = username
		self.password = password
		self.oauth_keys = oauth_keys
		if self.username is not None and self.password is not None:
			if self.authtype == "OAuth":
				self.request_token_url = 'https://twitter.com/oauth/request_token'
				self.access_token_url = 'https://twitter.com/oauth/access_token'
				self.authorization_url = 'http://twitter.com/oauth/authorize'
				self.signin_url = 'http://twitter.com/oauth/authenticate'
				# Do OAuth type stuff here - how should this be handled? Seems like a framework question...
			elif self.authtype == "Basic":
				self.auth_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
				self.auth_manager.add_password(None, "http://twitter.com", self.username, self.password)
				self.handler = urllib2.HTTPBasicAuthHandler(self.auth_manager)
				self.opener = urllib2.build_opener(self.handler)
				if headers is not None:
					self.opener.addheaders = [('User-agent', headers)]
				try:
					simplejson.load(self.opener.open("http://twitter.com/account/verify_credentials.json"))
					self.authenticated = True
				except HTTPError, e:
					raise TangoError("Authentication failed with your provided credentials. Try again? (%s failure)" % `e.code`, e.code)
开发者ID:altf,项目名称:tango,代码行数:25,代码来源:tango.py

示例9: __init__

    def __init__(self, resultfile=None):
        self.serialized_keys = ['options',
                                'name',
                                'tests',
                                'uname',
                                'wglinfo',
                                'glxinfo',
                                'lspci',
                                'time_elapsed']
        self.name = None
        self.uname = None
        self.options = None
        self.glxinfo = None
        self.lspci = None
        self.time_elapsed = None
        self.tests = {}

        if resultfile:
            # Attempt to open the json file normally, if it fails then attempt
            # to repair it.
            try:
                raw_dict = json.load(resultfile)
            except ValueError:
                raw_dict = json.load(self.__repairFile(resultfile))

            # Check that only expected keys were unserialized.
            for key in raw_dict:
                if key not in self.serialized_keys:
                    raise Exception('unexpected key in results file: ', str(key))

            self.__dict__.update(raw_dict)

            # Replace each raw dict in self.tests with a TestResult.
            for (path, result) in self.tests.items():
                self.tests[path] = TestResult(result)
开发者ID:ThirteenFish,项目名称:piglit,代码行数:35,代码来源:core.py

示例10: BestMatch

def BestMatch(sname, api_code):

    url = "https://opencorporates.com/reconcile/?query="+urllib.quote(sname)
    entities = simplejson.load(urllib.urlopen(url))

    try:
        bm = entities['result'][0]
        url = "https://api.opencorporates.com" + bm['id'] + '?api_token='+ api_code

        match = simplejson.load(urllib.urlopen(url))
        m = match['results']['company']

        if m['controlling_entity'] == None:
            cen = ''
            cec = ''
            ceu = ''
        else:
            cen = m['controlling_entity']['name']
            cec = m['controlling_entity']['jurisdiction_code']
            ceu = m['controlling_entity']['opencorporates_url']
    

        print sname, m['name'], m['jurisdiction_code'], m['company_number'], m['corporate_groupings'], m['agent_name'], m['agent_address'], m['alternative_names'], m['previous_names'], m['home_company'], cen, cec, ceu, m['inactive'], bm['score'], bm['match'], bm['uri'], m['registry_url']
        reconciled.writerow([sname, m['name'].encode('utf-8'), m['jurisdiction_code'], m['company_number'], m['corporate_groupings'], m['agent_name'], m['agent_address'], m['alternative_names'], m['previous_names'], m['home_company'], cen, cec, ceu, m['inactive'], bm['score'], bm['match'], bm['uri'], m['registry_url']])
        
        return match['results']['company']['name']

    except IndexError:
        reconciled.writerow([sname, 'nomatch'])

        return "nomatch"
开发者ID:zufanka,项目名称:pemex-tenders,代码行数:31,代码来源:reconcile.py

示例11: tz

def tz(phenny, input):
	"""Displays time in different time zones"""
	with open('nickloc.csv', 'rU') as f:
			z = csv.reader(f)
			nickdict = {}
			for key, val in z:
				nickdict[key] = val
	try:
		locinput = input.group(2)
		locinput1 = locinput.strip().lower()
		if locinput1 in nickdict:
			htmlinput = urllib.quote(nickdict[locinput1])
		else:
			htmlinput = urllib.quote(locinput1)
		url2 = 'http://nominatim.openstreetmap.org/search?q=' + htmlinput + '&format=json'
		jsonResponse = simplejson.load(urllib.urlopen(url2))
		lati = jsonResponse[0]['lat']
		longi = jsonResponse[0]['lon']
		loca = jsonResponse[0]['display_name']
		url3 = 'http://api.geonames.org/timezoneJSON?lat=' + lati + '&lng=' + longi + '&username=jinpark'
		jsonResponse1 = simplejson.load(urllib.urlopen(url3))
		if jsonResponse1['dstOffset'] == 0:
			timezone = ''
		elif jsonResponse1['dstOffset'] > 0:
			timezone = '+' + str(jsonResponse1['dstOffset'])
		else:
			timezone = str(jsonResponse1['dstOffset'])
		phennyout = loca + ": " + str(jsonResponse1['time']) + ' UTC' + timezone
		phenny.say(phennyout)
	except:
		phenny.say('Something went wrong')
开发者ID:jinp6301,项目名称:phenny-bot,代码行数:31,代码来源:tz.py

示例12: search

def search(query, resultnum=None, lang=None, since_id=None):
    output = []
    
    q = []
    rpp = 100
    q.append(urlencode({'q': query}))
    if not since_id is None:
        q.append(urlencode({'since_id': since_id}))
    if not lang is None:
        q.append(urlencode({'lang': lang}))
    if not resultnum is None:
        rpp = resultnum
    q.append(urlencode({'rpp': rpp}))
    baseurl = 'http://search.twitter.com/search.json'
    
    
    url = baseurl + '?' + '&'.join(q)
    print url
       
    response = urllib2.urlopen(url)
    data = simplejson.load(response)
    output.append(data)
    
    while 'next_page' in data:
        url = baseurl + data['next_page']
        print url
        response = urllib2.urlopen(url)
        data = simplejson.load(response)
        output.append(data)
    
    return output
开发者ID:diegocaro,项目名称:opinionapp,代码行数:31,代码来源:searchtwitter.py

示例13: apply_array_location_correction

def apply_array_location_correction(phot_groups_filepath):

    """
	Reads the phot_groups.json files and applies the array
	location correction to all measurements of ALL sources.
	Writes the result to disk in work_dir as
	'phot_groups_arrayloc.json'
	"""

    work_dir = "/".join(phot_groups_filepath.split("/")[:-1])
    meta = json.load(open(work_dir + "/metadata.json"))

    # read in the array location correction values
    root = os.path.abspath("../bcdphot")
    if meta["channel"] is "1":
        fp = os.path.join(root, "cal", "ch1_photcorr_ap_5.fits")
    elif meta["channel"] is "2":
        fp = os.path.join(root, "cal", "ch2_photcorr_ap_5.fits")
    arrloc = pyfits.open(fp)[0].data

    # read in the photometry JSON files
    ch = json.load(open(phot_groups_filepath))

    # apply correction
    for key in ch:
        for obs in ch[key]:
            x, y = obs[4:6]
            obs[6:] = [i * arrloc[x, y] for i in obs[6:]]

            # write to disk
    out_path = work_dir + "/phot_groups_arrayloc.json"
    with open(out_path, "w") as w:
        json.dump(ch, w, indent=4 * " ")
    print("created file: " + out_path)
开发者ID:john-livingston,项目名称:bcdphot,代码行数:34,代码来源:phot.py

示例14: get_manifest

def get_manifest(tarfile_path):
    """Extract and return manifest in tarball.

    Given a path to a tarfile, which is itself the product of "docker save",
    this discovers the manifest for the collection of images, which provides
    version information.
    """
    manifest = []
    configjson = None
    with contextlib.closing(tarfile.open(tarfile_path)) as archive:
        for member in archive.getmembers():
            # find the "manifest.json" file, which points to metadata json file
            if os.path.basename(member.path) == 'manifest.json':
                initial_manifest = json.load(archive.extractfile(member))
                configjson = initial_manifest[0]['Config']
        for member in archive.getmembers():
            # get manifest from shasum json file, docker ver > 1.10
            if configjson and os.path.basename(member.path) == configjson:
                image_data = json.load(archive.extractfile(member))
                manifest.append(image_data)
            # find the "json" files, which contain all image metadata
            # legacy code for docker ver < 1.10
            elif not configjson and os.path.basename(member.path) == 'json':
                image_data = json.load(archive.extractfile(member))
                manifest.append(image_data)
    return manifest
开发者ID:pbabinca,项目名称:dockpulp,代码行数:26,代码来源:imgutils.py

示例15: getPages

def getPages():
    """
    For each page of the diary: get the json and save in ./Diaries/diaryID/pageID.json
  """
    path = "Diaries/"
    urlBase = "http://transcripts.sl.nsw.gov.au/api/node/"
    errorsList = []
    for diaryUrl in diariesUrls:
        print diaryUrl
        response = urllib2.urlopen(diaryUrl)
        # print response.info()
        myjson = simplejson.load(response)
        pagesList = myjson["field_transcript_pages"]["und"]
        ## Create folder if it doesn't exist
        directory = path + diaryUrl[42:-5]
        if not os.path.exists(directory):
            os.makedirs(directory)
        ## Get each page in json
        for page in pagesList:
            print page["nid"]
            mypage = urlBase + page["nid"] + ".json"
            try:
                response = urllib2.urlopen(mypage)
                # print response.info()
                myjson = simplejson.load(response)
                file = open(directory + "/" + page["nid"] + ".json", "w+")
                file.write(str(myjson))
                file.close()
            except urllib2.HTTPError, e:
                print "We failed with error code - %s." % e.code
                print mypage
                print "$$$$$$$$$$$$"
                errorsList.append("diary:" + diaryUrl[42:-5] + ": page " + mypage)
开发者ID:TheTypoMaster,项目名称:crossreads,代码行数:33,代码来源:Get-pagesURLs-from-DiariesURL-WITH-unicode.py


注:本文中的simplejson.load函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。