本文整理汇总了Python中misc.filetool.save函数的典型用法代码示例。如果您正苦于以下问题:Python save函数的具体用法?Python save怎么用?Python save使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了save函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: depsToFlareFile
def depsToFlareFile(classDepsIter, depsLogConf):
data = {}
for (packageId, classId, depId, loadOrRun) in classDepsIter:
if classId not in data:
data[classId] = {}
data[classId]['name'] = classId
data[classId]["size"] = 1000
data[classId]["imports"] = []
if loadOrRun == 'load':
data[classId]['imports'].append(depId)
output = []
for cid in data.keys():
output.append(data[cid])
file = depsLogConf.get('flare/file', "flare.json")
console.info("Writing dependency data to file: %s" % file)
pretty = depsLogConf.get('flare/pretty', None)
if pretty:
indent = 2
separators = (', ', ': ')
else:
indent = None
separators = (',', ':')
filetool.save(file, json.dumps(output, sort_keys=True, indent=indent, separators=separators))
return
示例2: _handleResources
def _handleResources(script, generator, filtered=True):
def createResourceInfo(res, resval):
resinfo = [ { "target": "resource", "data": { res : resval }} ]
#filetool.save(approot+"/data/resource/" + res + ".json", json.dumpsCode(resinfo))
return resinfo
def copyResource(res, library):
sourcepath = os.path.join(library._resourcePath, res)
targetpath = approot + "/resource/" + res
filetool.directory(os.path.dirname(targetpath))
shutil.copy(sourcepath, targetpath)
return
# ----------------------------------------------------------------------
context.console.info("Processing resources: ", False)
approot = context.jobconf.get("provider/app-root", "./provider")
filetool.directory(approot+"/data")
filetool.directory(approot+"/resource")
# quick copy of runLogResources, for fast results
packages = script.packagesSorted()
parts = script.parts
variants = script.variants
allresources = {}
if filtered:
# -- the next call is fake, just to populate package.data.resources!
_ = generator._codeGenerator.generateResourceInfoCode(script, generator._settings, context.jobconf.get("library",[]))
for packageId, package in enumerate(packages):
allresources.update(package.data.resources)
else:
# get the main library
mainlib = [x for x in script.libraries if x.namespace == script.namespace][0]
reslist = mainlib.getResources()
allresources = ResourceHandler.createResourceStruct(reslist, updateOnlyExistingSprites = False)
# get resource info
resinfos = {}
numResources = len(allresources)
for num,res in enumerate(allresources):
context.console.progress(num+1, numResources)
# fake a classId-like resourceId ("a.b.c"), for filter matching
resId = os.path.splitext(res)[0]
resId = resId.replace("/", ".")
if filtered and not passesOutputfilter(resId):
continue
resinfos[res] = createResourceInfo(res, allresources[res])
# extract library name space
if isinstance(allresources[res], types.ListType): # it's an image = [14, 14, u'png', u'qx' [, u'qx/decoration/Modern/checkradio-combined.png', 0, 0]]
library_ns = allresources[res][3]
else: # html page etc. = "qx"
library_ns = allresources[res]
if library_ns: # library_ns == '' means embedded image -> no copying
library = libraries[library_ns]
copyResource(res, library)
filetool.save(approot+"/data/resource/resources.json", json.dumpsCode(resinfos))
return
示例3: migrateFile
def migrateFile(
filePath, compiledPatches, compiledInfos,
hasPatchModule=False, options=None, encoding="UTF-8"):
logging.info(" - File: %s" % filePath)
# Read in original content
fileContent = filetool.read(filePath, encoding)
fileId = extractFileContentId(fileContent);
# Apply patches
patchedContent = fileContent
if hasPatchModule and fileId is not None:
import patch
tree = treegenerator.createSyntaxTree(tokenizer.parseStream(fileContent))
# If there were any changes, compile the result
if patch.patch(fileId, tree):
options.prettyPrint = True # make sure it's set
result = [u'']
result = pretty.prettyNode(tree, options, result)
patchedContent = u''.join(result)
# apply RE patches
patchedContent = regtool(patchedContent, compiledPatches, True, filePath)
patchedContent = regtool(patchedContent, compiledInfos, False, filePath)
# Write file
if patchedContent != fileContent:
logging.info(" - %s has been modified. Storing modifications ..." % filePath)
filetool.save(filePath, patchedContent, encoding)
示例4: migrateFile
def migrateFile(filePath, compiledPatches, compiledInfos, patchFile, options=None, encoding="UTF-8"):
logging.info(" - File: %s" % filePath)
# Read in original content
fileContent = filetool.read(filePath, encoding)
fileId = extractFileContentId(fileContent)
# Apply patches
patchedContent = fileContent
if patchFile and fileId is not None:
# import patch
patch = {}
execfile(patchFile, patch)
tree = treegenerator.createFileTree(tokenizer.Tokenizer().parseStream(fileContent))
# If there were any changes, compile the result
if patch["patch"](fileId, tree):
options.prettyPrint = True # make sure it's set
result = [u""]
# result = pretty.prettyNode(tree, options, result)
result = formatter_.formatNode(tree, options, result)
patchedContent = u"".join(result)
# apply RE patches
patchedContent = regtool(patchedContent, compiledPatches, True, filePath)
patchedContent = regtool(patchedContent, compiledInfos, False, filePath)
# Write file
if patchedContent != fileContent:
logging.info(" - %s has been modified. Storing modifications ..." % filePath)
filetool.save(filePath, patchedContent, encoding)
示例5: runFix
def runFix(jobconf, classesObj):
def fixPng():
return
def removeBOM(fpath):
content = open(fpath, "rb").read()
if content.startswith(codecs.BOM_UTF8):
console.debug("removing BOM: %s" % filePath)
open(fpath, "wb").write(content[len(codecs.BOM_UTF8):])
return
# - Main ---------------------------------------------------------------
if not isinstance(jobconf.get("fix-files", False), types.DictType):
return
console = Context.console
classes = classesObj.keys()
fixsettings = ExtMap(jobconf.get("fix-files"))
# Fixing JS source files
console.info("Fixing whitespace in source files...")
console.indent()
console.info("Fixing files: ", False)
numClasses = len(classes)
eolStyle = fixsettings.get("eol-style", "LF")
tabWidth = fixsettings.get("tab-width", 2)
for pos, classId in enumerate(classes):
console.progress(pos+1, numClasses)
classEntry = classesObj[classId]
filePath = classEntry.path
fileEncoding = classEntry.encoding
fileContent = filetool.read(filePath, fileEncoding)
# Caveat: as filetool.read already calls any2Unix, converting to LF will
# not work as the file content appears unchanged to this function
if eolStyle == "CR":
fixedContent = textutil.any2Mac(fileContent)
elif eolStyle == "CRLF":
fixedContent = textutil.any2Dos(fileContent)
else:
fixedContent = textutil.any2Unix(fileContent)
fixedContent = textutil.normalizeWhiteSpace(textutil.removeTrailingSpaces(textutil.tab2Space(fixedContent, tabWidth)))
if fixedContent != fileContent:
console.debug("modifying file: %s" % filePath)
filetool.save(filePath, fixedContent, fileEncoding)
# this has to go separate, as it requires binary operation
removeBOM(filePath)
console.outdent()
# Fixing PNG files -- currently just a stub!
if fixsettings.get("fix-png", False):
console.info("Fixing PNGs...")
console.indent()
fixPng()
console.outdent()
return
示例6: _handleI18N
def _handleI18N(script, generator):
context.console.info("Processing localisation data")
context.console.indent()
approot = context.jobconf.get("provider/app-root", "./provider")
# get class projection
class_list = []
needs_cldr = False
for classObj in script.classesObj:
if passesOutputfilter(classObj.id):
class_list.append(classObj.id)
if not needs_cldr and classObj.getHints('cldr'):
needs_cldr = True
# get i18n data
context.console.info("Getting translations")
trans_dat = generator._locale.getTranslationData(class_list, script.variants, script.locales,
addUntranslatedEntries=True)
loc_dat = None
if needs_cldr:
context.console.info("Getting CLDR data")
loc_dat = generator._locale.getLocalizationData(class_list, script.locales)
# write translation and cldr files
context.console.info("Writing localisation files: ", False)
numTrans = len(trans_dat)
for num,lang in enumerate(trans_dat):
context.console.progress(num+1, numTrans)
# translations
transmap = {}
filename = "i18n-" + lang
targetname = "i18n-" + lang
translations = trans_dat[lang]
for key in translations:
if translations[key]:
transmap[key] = [ { "target" : targetname, "data" : { key : translations[key] }} ]
else:
transmap[key] = [ ]
filetool.save(approot+"/data/translation/"+filename+".json", json.dumpsCode(transmap))
# cldr
localemap = {}
filename = "locale-" + lang
targetname = "locale-" + lang
if loc_dat:
# sample: { "cldr" : [ { "target" : "locale-en", "data" : {"alternativeQuotationEnd":'"', "cldr_am": "AM",...}} ]}
localekeys = loc_dat[lang]
cldr_entry = [ { "target" : targetname, "data" : { }} ]
for key in localekeys:
if localekeys[key]:
cldr_entry[0]['data'][key] = localekeys[key]
localemap['cldr'] = cldr_entry
filetool.save(approot+"/data/locale/"+filename+".json", json.dumpsCode(localemap))
context.console.outdent()
return
示例7: runFontMap
def runFontMap(jobconf, confObj):
if not jobconf.get("font-map", False):
return
console = Context.console
cache = Context.cache
# Test for fontforge
try:
import fontforge
except ImportError:
console.error("Font map generation is not possible: fontforge is missing")
return
console.info("Generating font map...")
console.indent()
done = []
fonts = jobconf.get("font-map/fonts", {})
for font, fontspec in fonts.iteritems():
alias = fontspec["alias"] or font.fontfamily
if alias in done:
continue
done.append(alias);
config = {
"alias" : alias or font.fontfamily,
"size" : fontspec["size"] or 48,
"mapping" : {}
}
fo = fontforge.open(font)
for glyph in fo:
go = fo[glyph]
if go.unicode > 0:
config["mapping"][go.glyphname] = [go.unicode, round(go.width / float(go.vwidth), 3)]
# store meta data for this font
bname = os.path.basename(font)
ri = bname.rfind('.')
if ri > -1:
bname = bname[:ri]
bname += '.meta'
meta_fname = os.path.join(os.path.dirname(font), bname)
console.debug("writing meta file %s" % meta_fname)
filetool.save(meta_fname, json.dumps(config, ensure_ascii=False, sort_keys=True))
console.outdent()
return
示例8: writePackage
def writePackage(content, packageId=""):
# Construct file name
resolvedFilePath = self._resolveFileName(filePath, variants, settings, packageId)
# Save result file
filetool.save(resolvedFilePath, content)
if compConf.get("paths/gzip"):
filetool.gzip(resolvedFilePath, content)
self._console.debug("Done: %s" % self._computeContentSize(content))
self._console.debug("")
return
示例9: _handleResources
def _handleResources(script, generator):
def createResourceInfo(res, resval):
resinfo = [ { "target": "resource", "data": { res : resval }} ]
#filetool.save(approot+"/data/resource/" + res + ".json", json.dumpsCode(resinfo))
return resinfo
def copyResource(res, library):
sourcepath = os.path.join(library['path'], library['resource'], res)
targetpath = approot + "/resource/" + res
filetool.directory(os.path.dirname(targetpath))
shutil.copy(sourcepath, targetpath)
return
# ----------------------------------------------------------------------
approot = context.jobconf.get("provider/app-root", "./provider")
filetool.directory(approot+"/data")
filetool.directory(approot+"/resource")
# quick copy of runLogResources, for fast results
packages = script.packagesSortedSimple()
parts = script.parts
variants = script.variants
allresources = {}
# get resource info
# -- the next call is fake, just to populate package.data.resources!
_ = generator._codeGenerator.generateResourceInfoCode(script, generator._settings, context.jobconf.get("library",[]))
for packageId, package in enumerate(packages):
allresources.update(package.data.resources)
resinfos = {}
for res in allresources:
# fake a classId-like resourceId ("a.b.c"), for filter matching
resId = os.path.splitext(res)[0]
resId = resId.replace("/", ".")
if passesOutputfilter(resId):
resinfos[res] = createResourceInfo(res, allresources[res])
# extract library name space
if isinstance(allresources[res], types.ListType): # it's an image = [14, 14, u'png', u'qx' [, u'qx/decoration/Modern/checkradio-combined.png', 0, 0]]
library_ns = allresources[res][3]
else: # html page etc. = "qx"
library_ns = allresources[res]
library = libraries[library_ns]
copyResource(res, library)
filetool.save(approot+"/data/resource/resources.json", json.dumpsCode(resinfos))
return
示例10: generateHttpdConfig
def generateHttpdConfig(jobconf, confObj):
console = Context.console
# read config
jconf_app_namespace = jobconf.get("let/APPLICATION")
assert jconf_app_namespace
jconf_conf_dir = jobconf.get("web-server-config/output-dir", ".")
jconf_conf_dir = confObj.absPath(jconf_conf_dir)
jconf_template_dir = jobconf.get("web-server-config/template-dir")
assert jconf_template_dir
jconf_httpd_type = jobconf.get("web-server-config/httpd-type", "apache2")
jconf_httpd_hosturl = jobconf.get("web-server-config/httpd-host-url", "http://localhost")
libs = jobconf.get("library", [])
assert libs
for lib in libs:
lib._init_from_manifest()
config_path = os.path.join(jconf_conf_dir, jconf_httpd_type + ".conf")
template_path = os.path.join(jconf_template_dir, "httpd." + jconf_httpd_type + ".tmpl.conf")
alias_path = jconf_app_namespace.replace(".", "/")
# collect config values
value_map = {
"APP_HTTPD_CONFIG" : "",
"LOCALHOST_APP_URL" : "",
"APP_NAMESPACE_AS_PATH" : "",
"APP_DOCUMENT_ROOT" : "",
}
value_map['APP_HTTPD_CONFIG'] = config_path
doc_root = get_doc_root(jobconf, confObj)
value_map['APP_DOCUMENT_ROOT'] = ensure_trailing_slash(doc_root)
app_web_path = from_doc_root_to_app_root(jobconf, confObj, doc_root)
value_map['LOCALHOST_APP_URL'] = "/".join((jconf_httpd_hosturl, alias_path, app_web_path))
value_map['APP_NAMESPACE_AS_PATH'] = alias_path
# load httpd-specific template
config_templ = filetool.read(template_path)
# replace macros
config_templ = string.Template(config_templ)
config = config_templ.safe_substitute(value_map)
# write .conf file
console.info("Writing configuration file for '%s': '%s'" % (jconf_httpd_type, config_path))
filetool.save(config_path, config)
console.info("See the file's comments how to integrate it with the web server configuration")
console.info("Then open your source application with '%s'" % value_map['LOCALHOST_APP_URL'])
示例11: main
def main():
apidata = {}
apidata['type'] = 'doctree'
apidata['children'] = []
apidata['attributes'] = {}
apidata['children'].append({
"type":"packages","attributes":{},"children":[]
})
filetool.directory(store_path)
dirwalker = filetool.find(module_root, r'\.py$')
for pyfile in dirwalker:
#if os.stat(pyfile).st_size == 0:
# continue
# get the file's api doc as json
filejson = pyapi2json(pyfile)
apipackage = file2package(pyfile, module_root)
# and store it
filetool.save(store_path+'/'+apipackage+'.json', filejson)
# make an entry in apidata struct
levels = apipackage.split('.')
curr = apidata['children'][0]['children']
for pos,level in enumerate(levels):
if level not in (x['attributes']['name'] for x in curr if 'name' in x['attributes']):
newentry = {
"children" : [],
"type" : "packages" if pos % 2 else "package",
"attributes" : {
"packageName" : ".".join(levels[:pos]),
"name" : level,
"fullName" : ".".join(levels[:pos+1])
}
}
if pos==len(levels)-1:
newentry["externalRef"] = True
#del newentry['children']
#newentry["type"] = "classes"
pass
curr.append(newentry)
curr = newentry['children']
else:
curr = [x['children'] for x in curr if x['attributes']['name']==level][0]
# store apidata
filetool.save(store_path+'/'+"apidata.json", json.dumps(apidata))
示例12: _handleI18N
def _handleI18N(script, generator):
approot = context.jobconf.get("provider/app-root", "./provider")
# get class projection
class_list = []
needs_cldr = False
for classObj in script.classesObj:
if passesOutputfilter(classObj.id):
class_list.append(classObj.id)
if not needs_cldr and classObj.getMeta('cldr'):
needs_cldr = True
# get i18n data
trans_dat = generator._locale.getTranslationData_1(class_list, script.variants, script.locales,
addUntranslatedEntries=True)
loc_dat = None
if needs_cldr:
loc_dat = generator._locale.getLocalizationData(class_list, script.locales)
# write translation and cldr files
for lang in trans_dat:
filename = "i18n-" + lang
# translations
transmap = {}
translations = trans_dat[lang]
for key in translations:
if translations[key]:
transmap[key] = [ { "target" : "i18n", "data" : { key : translations[key] }} ]
else:
transmap[key] = [ ]
filetool.save(approot+"/data/translation/"+filename+".json", json.dumpsCode(transmap))
# cldr
localemap = {}
if loc_dat:
localekeys = loc_dat[lang]
for key in localekeys:
if localekeys[key]:
localemap[key] = [ { "target" : "i18n", "data" : { key : localekeys[key] }} ]
else:
localemap[key] = [ ]
filetool.save(approot+"/data/locale/"+filename+".json", json.dumpsCode(localemap))
return
示例13: runPrettyPrinting
def runPrettyPrinting(self, classesObj):
if not isinstance(self._job.get("pretty-print", False), types.DictType):
return
self._console.info("Pretty-printing code...")
self._console.indent()
ppsettings = ExtMap(self._job.get("pretty-print")) # get the pretty-print config settings
# init options
def options(): pass
pretty.defaultOptions(options)
# modify according to config
if 'general/indent-string' in ppsettings:
options.prettypIndentString = ppsettings.get('general/indent-string')
if 'comments/block/add' in ppsettings:
options.prettypCommentsBlockAdd = ppsettings.get('comments/trailing/keep-column')
if 'comments/trailing/keep-column' in ppsettings:
options.prettypCommentsTrailingKeepColumn = ppsettings.get('comments/trailing/keep-column')
if 'comments/trailing/comment-cols' in ppsettings:
options.prettypCommentsTrailingCommentCols = ppsettings.get('comments/trailing/comment-cols')
if 'comments/trailing/padding' in ppsettings:
options.prettypCommentsInlinePadding = ppsettings.get('comments/trailing/padding')
if 'code/align-with-curlies' in ppsettings:
options.prettypAlignBlockWithCurlies = ppsettings.get('code/align-with-curlies')
if 'code/open-curly/newline-before' in ppsettings:
options.prettypOpenCurlyNewlineBefore = ppsettings.get('code/open-curly/newline-before')
if 'code/open-curly/indent-before' in ppsettings:
options.prettypOpenCurlyIndentBefore = ppsettings.get('code/open-curly/indent-before')
self._console.info("Pretty-printing files: ", False)
numClasses = len(classesObj)
for pos, classId in enumerate(classesObj):
self._console.progress(pos+1, numClasses)
tree = classesObj[classId].tree()
result = [u'']
result = pretty.prettyNode(tree, options, result)
compiled = u''.join(result)
filetool.save(self._classes[classId].path, compiled)
self._console.outdent()
return
示例14: runLogResources
def runLogResources(jobconf, script):
if not isinstance(jobconf.get("log/resources", False), types.DictType):
return
console = Context.console
packages = script.packagesSorted()
console.info("Dumping resource info...");
console.indent()
allresources = {}
# get resource info
CodeGenerator.packagesResourceInfo(script) # populate package.data.resources
for packageId, package in enumerate(packages):
allresources.update(package.data.resources)
file_ = jobconf.get("log/resources/file", "resources.json")
filetool.save(file_, json.dumpsCode(allresources))
console.outdent()
return
示例15: depsToJsonFile
def depsToJsonFile(classDepsIter, depsLogConf):
data = {}
for (packageId, classId, depId, loadOrRun) in classDepsIter:
if classId not in data:
data[classId] = {}
data[classId]["load"] = []
data[classId]["run"] = []
data[classId][loadOrRun].append(depId)
file = depsLogConf.get('json/file', "deps.json")
console.info("Writing dependency data to file: %s" % file)
pretty = depsLogConf.get('json/pretty', None)
if pretty:
indent = 2
separators = (', ', ': ')
else:
indent = None
separators = (',', ':')
filetool.save(file, json.dumps(data, sort_keys=True, indent=indent, separators=separators))
return