本文整理汇总了Python中simplejson.dump函数的典型用法代码示例。如果您正苦于以下问题:Python dump函数的具体用法?Python dump怎么用?Python dump使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dump函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: update_notified
def update_notified(jobs):
with open(filename, 'r') as f:
data = simplejson.load(f)
for job in jobs:
data['jobs'][job]['notified'] = True
with open(filename, 'w') as f:
simplejson.dump(data, f)
示例2: run
def run(self, options, **kwargs):
config_file = os.path.join(options.app_path, "wsgid.json")
f = self._open_config_file(config_file)
s = f.read()
cfg_values = {}
if s:
cfg_values = simplejson.loads(s)
# Copy the values
self._override_if_not_none("wsgi_app", cfg_values, options.wsgi_app)
self._override_if_not_none("debug", cfg_values, options.debug)
if options.workers > 1:
self._override_if_not_none("workers", cfg_values, options.workers, convert_func=int)
self._override_if_not_none("keep_alive", cfg_values, options.keep_alive)
self._override_if_not_none("chroot", cfg_values, options.chroot)
self._override_if_not_none("no_daemon", cfg_values, options.no_daemon)
self._override_if_not_none("recv", cfg_values, options.recv)
self._override_if_not_none("send", cfg_values, options.send)
self._override_if_not_none("mongrel2_chroot", cfg_values, options.mongrel2_chroot)
# Custom config command options
if options.no_debug:
cfg_values["debug"] = str((not options.no_debug))
# Rewrite the config file
f.seek(0)
f.truncate()
simplejson.dump(cfg_values, f, indent=" ")
f.close()
示例3: sync
def sync(self):
"""Synchronise and update the stored state to the in-memory state."""
if self.filepath:
with open(self.filepath, "w") as serialised_file:
simplejson.dump(self.state, serialised_file)
else:
print "Filepath to the persistence file is not set. State cannot be synced to disc."
示例4: generate_top
def generate_top():
from collections import defaultdict
import simplejson as json
import operator
from_product = defaultdict(lambda: 0)
results = defaultdict(lambda: 0)
for product in cols['product'].find().sort('_id', -1):
for k in product.keys():
from_product[k] += 1
product_keys = dict(from_product)
for w in list(product_keys.keys()):
jieba.add_word(w, tag='nz')
progress = 0
for comment in cols['mobile_comment'].find(projection={'content': 1}):
c = comment['content']
words = jieba.analyse.extract_tags(c, topK=20, withWeight=False, allowPOS=('ns', 'n', 'nz'))
for w in words:
results[w] += 1
progress += 1
if progress % 100 == 0:
print('Current Progress: ', progress)
sorted_x = reversed(sorted(dict(results).items(), key=operator.itemgetter(1)))
json.dump(
list(sorted_x),
open('sorted_mobile.json', mode='w', encoding='utf-8'),
ensure_ascii=False,
indent=2
)
示例5: test_tuple_array_dump
def test_tuple_array_dump(self):
t = (1, 2, 3)
expect = json.dumps(list(t))
# Default is True
sio = StringIO()
json.dump(t, sio)
self.assertEqual(expect, sio.getvalue())
sio = StringIO()
json.dump(t, sio, tuple_as_array=True)
self.assertEqual(expect, sio.getvalue())
self.assertRaises(TypeError, json.dump, t, StringIO(),
tuple_as_array=False)
# Ensure that the "default" does not get called
sio = StringIO()
json.dump(t, sio, default=repr)
self.assertEqual(expect, sio.getvalue())
sio = StringIO()
json.dump(t, sio, tuple_as_array=True, default=repr)
self.assertEqual(expect, sio.getvalue())
# Ensure that the "default" gets called
sio = StringIO()
json.dump(t, sio, tuple_as_array=False, default=repr)
self.assertEqual(
json.dumps(repr(t)),
sio.getvalue())
示例6: editfile
def editfile(fn, password):
filetype = aespckfile
if ".json" in fn:
filetype = aesjsonfile
db = filetype.load(fn, password)
f = tempfile.NamedTemporaryFile()
json.dump(db, f, indent=2)
f.flush()
mtime = os.path.getmtime(f.name)
while True:
subprocess.call([os.getenv("EDITOR") or "editor", f.name])
if os.path.getmtime(f.name) == mtime:
print "Not updated"
break
try:
f.seek(0)
db = json.load(f)
filetype.dump(fn, db, password)
break
except Exception, e:
print "Error in json"
print e
print "Try again (y/n)? ",
input = raw_input()
if not input.lower().startswith("y"):
break
示例7: main
def main(argv):
data, lang = argv[-2:]
f = myStore.load(data)
lang = f.newSymbol(lang)
it = {
"rules": asGrammar(f, lang),
"tokens": tokens(f, lang),
"first": sets(f, lang, EBNF.first),
"follow": sets(f, lang, EBNF.follow),
}
if "--pprint" in argv:
from pprint import pprint
pprint(it)
elif "--yacc" in argv:
toYacc(it)
elif "--ply" in argv:
toPly(it)
else:
import simplejson # http://cheeseshop.python.org/pypi/simplejson
import sys
start = it["rules"][0][0]
print "SYNTAX_%s = " % start,
simplejson.dump(it, sys.stdout)
示例8: outputDashboard
def outputDashboard(self):
log.debug("Creating dashboard")
dirname = self.config.get('main', 'dashboard_dir')
if not os.path.exists(dirname):
# Copy in the rest of html
shutil.copytree('html/dashboard', dirname)
shutil.copytree('html/flot', '%s/flot' % dirname)
shutil.copytree('html/jquery', '%s/jquery' % dirname)
filename = os.path.join(dirname, 'testdata.js')
fp = open(filename + ".tmp", "w")
now = time.asctime()
fp.write("// Generated at %s\n" % now)
fp.write("gFetchTime = ")
json.dump(now, fp, separators=(',',':'))
fp.write(";\n")
fp.write("var gData = ")
# Hackity hack
# Don't pretend we have double precision here
# 8 digits of precision is plenty
try:
json.encoder.FLOAT_REPR = lambda f: "%.8g" % f
except:
pass
json.dump(self.dashboard_data, fp, separators=(',',':'), sort_keys=True)
try:
json.encoder.FLOAT_REPR = repr
except:
pass
fp.write(";\n")
fp.close()
os.rename(filename + ".tmp", filename)
示例9: write
def write(self, filename, auto=False, data={}):
"""
Write the graph to scene file.
:param str filename: file to save.
:param bool auto: file is an autosave, don't set it as the current scene.
:param dict data: dictionary of graph data.
:returns: current scene name.
:rtype: str
"""
# callbacks
self.graphAboutToBeSaved()
if not data:
data = self.snapshot()
fn = open(filename, 'w')
json.dump(data, fn, indent=4)
fn.close()
if auto:
self._autosave_file = filename
# don't set the current autosave filename as the scene, use the parent filename
filename = filename[:-1]
# callbacks
self.graphSaved()
return self.setScene(filename)
示例10: main
def main(inputPtnPath,outputPath):
start_time = datetime.now()
model, table = projizz.readPrefixTreeModelWithTable("./yagoPatternTree.model","./yagoPatternTree.table")
properties = projizz.buildYagoProperties({})
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
t = 0
result = []
for filename in os.listdir(inputPtnPath):
if ".json" in filename:
result.append(pool.apply_async(filterFunction, (t,filename,inputPtnPath,model,table,copy.deepcopy(properties) )))
t += 1
pool.close()
pool.join()
for res in result:
r = res.get()
for rela in r:
for ptnId in r[rela]:
if not ptnId in properties[rela]:
properties[rela][ptnId] = {"total":0,"support":0}
properties[rela][ptnId]["total"] += r[rela][ptnId]["total"]
properties[rela][ptnId]["support"] += r[rela][ptnId]["support"]
json.dump(properties,open(outputPath,"w"))
diff = datetime.now() - start_time
print "Spend %d.%d seconds" % (diff.seconds, diff.microseconds)
示例11: generateShowsAndSave
def generateShowsAndSave(self):
f = open('../../xml/tv3shows.json', 'w')
for show in self.getMainMenu():
# Load and read the URL
f2 = urllib2.urlopen(EPISODE_URL % (show['url']))
text = f2.read()
f2.close()
key = show['Title']
try:
showkeys = self.KNOWN_TV3_SHOWS[key].keys()
print 'Updating ' + show['Title']
self.KNOWN_TV3_SHOWS[key]['']
except:
print 'Adding ' + show['Title']
self.KNOWN_TV3_SHOWS[key] = {}
self.KNOWN_TV3_SHOWS[key]['Title'] = show['Title']
REGEXP = '<div id="content" style="background-image: url\((.*?)\)">'
for mymatch in re.findall(REGEXP, text, re.MULTILINE):
fanart = mymatch
self.KNOWN_TV3_SHOWS[key]['Fanart_Image'] = fanart
S.dump(self.KNOWN_TV3_SHOWS, f, indent=4)
f.close()
示例12: main
def main(inputPath,outputFileName):
properties = buildProperties("../naive_model/PbR/")
start_time = datetime.now()
result = []
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
t = 0
for filename in os.listdir(inputPath):
if ".json" in filename:
partAns = copy.deepcopy(properties)
result.append(pool.apply_async(findAnwser, (t,filename,inputPath,partAns, )))
t += 1
pool.close()
pool.join()
for res in result:
r = res.get()
for m in r:
properties[m]["tp"] += r[m]["tp"]
properties[m]["tn"] += r[m]["tn"]
properties[m]["fp"] += r[m]["fp"]
properties[m]["fn"] += r[m]["fn"]
print "start write out to %s" % (outputFileName)
json.dump(properties,open(outputFileName,"w"))
diff = datetime.now() - start_time
print "Spend %d.%d seconds" % (diff.seconds, diff.microseconds)
示例13: display_matching_trips
def display_matching_trips(request, trip_id=None, lib=None):
"""Make a request to the BV server to find matching trips. Format the
output to be read by javascript clientside code.
"""
def to_json(trip):
return [get_trip_dict(t) for t in trips]
trip_search_type = int(request.POST['trip_type'])
results = lib.search_trip(trip_id=trip_id, **unicode_to_dict(request.POST))
trip_demands = results['trip_demands']
trip_offers = results['trip_offers']
trip = results['trip']
if trip_search_type == TRIP_OFFER:
trips = trip_demands
else:
trips = trip_offers
response_dict = {
'authenticated': is_bvoauth_authenticated(request),
}
if not trip_id:
response_dict['trips'] = to_json(trips)
else:
response_dict['trip_demands'] = to_json(trip_demands)
response_dict['trip_offers'] = to_json(trip_offers)
resp = HttpResponse()
simplejson.dump(response_dict , resp, ensure_ascii=False, separators=(',',':'))
return resp
示例14: savemsgstore
def savemsgstore():
try:
f = open('generalmessage.json','w')
json.dump(generalmessagestore,f)
f.close()
except:
pass
示例15: update
def update(self):
"""
"""
params = {'key': yeti_config.get('threattracking', 'google_api_key')}
# , 'includeGridData': 'True'} - we don't want to do that. 200Mo file.
base = "https://sheets.googleapis.com/v4/spreadsheets/" + yeti_config.get(
'threattracking', 'sheet_key')
self.api = hammock.Hammock(base, params=params)
if False:
r = self.api.GET()
if r.status_code != 200:
raise requests.ConnectionError(
'Return code for {query} is {code}'.format(
query=r.request.url, code=r.status_code))
sheets = r.json()['sheets']
json.dump(sheets, open("actor.sheets.json", "w"))
else:
sheets = json.load(open("actor.sheets.json", "r"))
# print(pprint.pformat(sheets))
for s_p in sheets:
s = s_p['properties']
title = s['title']
if title in ['Home', '_Malware', '_Download', '_Schemes',
'_Sources']:
continue
size = s['gridProperties']
# print(title, size['columnCount'], size['rowCount'])
actors_list_info = self.each_sheet_work(s)
self.create_entities(title, actors_list_info)
return