本文整理汇总了Python中Cerebrum.utils.atomicfile.SimilarSizeWriter.max_pct_change方法的典型用法代码示例。如果您正苦于以下问题:Python SimilarSizeWriter.max_pct_change方法的具体用法?Python SimilarSizeWriter.max_pct_change怎么用?Python SimilarSizeWriter.max_pct_change使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Cerebrum.utils.atomicfile.SimilarSizeWriter
的用法示例。
在下文中一共展示了SimilarSizeWriter.max_pct_change方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: write_edu_info
# 需要导入模块: from Cerebrum.utils.atomicfile import SimilarSizeWriter [as 别名]
# 或者: from Cerebrum.utils.atomicfile.SimilarSizeWriter import max_pct_change [as 别名]
def write_edu_info(self, edu_info_file):
"""Lag en fil med informasjon om alle studentenes 'aktiviteter'
registrert i FS.
Spesifikt, lister vi opp alle deltagelser ved:
- undenh
- undakt
- kullklasser
- kull
"""
logger.info("Writing edu info for all students")
f = SimilarSizeWriter(edu_info_file, mode='w',
encoding=XML_ENCODING)
f.max_pct_change = 50
f.write(xml.xml_hdr + "<data>\n")
for xml_tag, generator in (
("undenh",
self.fs.undervisning.list_studenter_alle_undenh),
("undakt",
self.fs.undervisning.list_studenter_alle_undakt),
("kullklasse",
self.fs.undervisning.list_studenter_alle_kullklasser),
("kull",
self.fs.undervisning.list_studenter_alle_kull)):
logger.debug("Processing %s entries", xml_tag)
for row in generator():
keys = row.keys()
f.write(xml.xmlify_dbrow(row, keys, xml_tag) + "\n")
f.write("</data>\n")
f.close()
示例2: generate_filegroup
# 需要导入模块: from Cerebrum.utils.atomicfile import SimilarSizeWriter [as 别名]
# 或者: from Cerebrum.utils.atomicfile.SimilarSizeWriter import max_pct_change [as 别名]
def generate_filegroup(self, filename):
logger.debug("generate_group: %s" % filename)
f = SimilarSizeWriter(filename, "w", encoding='UTF-8')
f.max_pct_change = 5
groups = self._exported_groups.keys()
groups.sort()
for group_id in groups:
group_name = self._exported_groups[group_id]
tmp = posix_group.illegal_name(group_name)
if tmp or len(group_name) > 8:
logger.warn("Bad groupname %s %s" % (group_name, tmp))
continue
try:
group_members, user_members = self._expand_group(group_id)
except Errors.NotFoundError:
logger.warn("Group %s has no GID", group_id)
continue
tmp_users = self._filter_illegal_usernames(user_members,
group_name)
logger.debug("%s -> g=%s, u=%s" % (
group_id, group_members, tmp_users))
f.write(self._wrap_line(group_name, ",".join(tmp_users),
':*:%i:' % self._group.posix_gid))
if e_o_f:
f.write('E_O_F\n')
f.close()
示例3: write_kull_info
# 需要导入模块: from Cerebrum.utils.atomicfile import SimilarSizeWriter [as 别名]
# 或者: from Cerebrum.utils.atomicfile.SimilarSizeWriter import max_pct_change [as 别名]
def write_kull_info(self, kull_info_file):
"""Lag en fil med informasjon om alle studentenes kulldeltakelse
registrert i FS.
Spesifikt, lister vi opp alle deltagelser ved:
- kullklasser
- kull
"""
logger.info("Writing kull info to '%s'", kull_info_file)
f = SimilarSizeWriter(kull_info_file, mode='w',
encoding=XML_ENCODING)
f.max_pct_change = 50
f.write(xml.xml_hdr + "<data>\n")
for xml_tag, generator in (
("kullklasse",
self.fs.undervisning.list_studenter_alle_kullklasser),
("kulldeltaker",
self.fs.undervisning.list_studenter_alle_kull),
("kull",
self.fs.info.list_kull)):
for row in generator():
keys = row.keys()
f.write(xml.xmlify_dbrow(row, keys, xml_tag) + "\n")
f.write("</data>\n")
f.close()
示例4: ldif_outfile
# 需要导入模块: from Cerebrum.utils.atomicfile import SimilarSizeWriter [as 别名]
# 或者: from Cerebrum.utils.atomicfile.SimilarSizeWriter import max_pct_change [as 别名]
def ldif_outfile(tree, filename=None, default=None, explicit_default=False,
max_change=None, module=cereconf):
"""(Open and) return LDIF outfile for <tree>.
Use <filename> if specified,
otherwise module.LDAP_<tree>['file'] unless <explicit_default>,
otherwise return <default> (an open filehandle) if that is not None.
(explicit_default should be set if <default> was opened from a
<filename> argument and not from module.LDAP*['file'].)
When opening a file, use SimilarSizeWriter where close() fails if
the resulting file has changed more than <max_change>, or
module.LDAP_<tree>['max_change'], or module.LDAP['max_change'].
If max_change is unset or >= 100, just open the file normally.
"""
if not (filename or explicit_default):
filename = getattr(module, 'LDAP_' + tree).get('file')
if filename:
filename = os.path.join(module.LDAP['dump_dir'], filename)
if filename:
if max_change is None:
max_change = ldapconf(tree, 'max_change', default=ldapconf(
None, 'max_change', default=100, module=module),
module=module)
if max_change < 100:
f = SimilarSizeWriter(filename, 'w')
f.max_pct_change = max_change
else:
f = AtomicFileWriter(filename, 'w')
return f
if default:
return default
raise _Errors.CerebrumError(
'Outfile not specified and LDAP_{0}["file"] not set'.format(tree))
示例5: write_undenh_student
# 需要导入模块: from Cerebrum.utils.atomicfile import SimilarSizeWriter [as 别名]
# 或者: from Cerebrum.utils.atomicfile.SimilarSizeWriter import max_pct_change [as 别名]
def write_undenh_student(self, undenh_student_file):
"""Skriv oversikt over personer oppmeldt til undervisningsenheter.
Tar med data for alle undervisingsenheter i inneværende+neste
semester."""
logger.info("Writing undenh_student info to '%s'",
undenh_student_file)
f = SimilarSizeWriter(undenh_student_file, mode='w',
encoding=XML_ENCODING)
f.max_pct_change = 50
f.write(xml.xml_hdr + "<data>\n")
for semester in ('current', 'next'):
cols, undenh = self._ext_cols(
self.fs.undervisning.list_undervisningenheter(sem=semester))
for u in undenh:
u_attr = {}
for k in ('institusjonsnr', 'emnekode', 'versjonskode',
'terminnr', 'terminkode', 'arstall'):
u_attr[k] = u[k]
student_cols, student = self._ext_cols(
self.fs.undervisning.list_studenter_underv_enhet(**u_attr))
for s in student:
s_attr = u_attr.copy()
for k in ('fodselsdato', 'personnr'):
s_attr[k] = s[k]
f.write(xml.xmlify_dbrow({}, (), 'student',
extra_attr=s_attr) + "\n")
f.write("</data>\n")
f.close()
示例6: main
# 需要导入模块: from Cerebrum.utils.atomicfile import SimilarSizeWriter [as 别名]
# 或者: from Cerebrum.utils.atomicfile.SimilarSizeWriter import max_pct_change [as 别名]
def main():
opts, rest = getopt.getopt(sys.argv[1:], "o:", ("output=",))
filename = None
for option, value in opts:
if option in ("-o", "--output"):
filename = value
f = SimilarSizeWriter(filename, "w")
f.max_pct_change = 50
output_file(sort_list(generate_list()), f)
f.close()
示例7: write_ou_info
# 需要导入模块: from Cerebrum.utils.atomicfile import SimilarSizeWriter [as 别名]
# 或者: from Cerebrum.utils.atomicfile.SimilarSizeWriter import max_pct_change [as 别名]
def write_ou_info(self, institution_number, ou_file):
"""Lager fil med informasjon om alle OU-er"""
logger.info("Writing OU info to '%s'", ou_file)
f = SimilarSizeWriter(ou_file, mode='w', encoding=XML_ENCODING)
f.max_pct_change = 50
f.write(xml.xml_hdr + "<data>\n")
cols, ouer = self._ext_cols(
self.fs.info.list_ou(institution_number))
for o in ouer:
sted = {}
for fs_col, xml_attr in (
('faknr', 'fakultetnr'),
('instituttnr', 'instituttnr'),
('gruppenr', 'gruppenr'),
('stedakronym', 'akronym'),
('stedakronym', 'forkstednavn'),
('stednavn_bokmal', 'stednavn'),
('stedkode_konv', 'stedkode_konv'),
('faknr_org_under', 'fakultetnr_for_org_sted'),
('instituttnr_org_under', 'instituttnr_for_org_sted'),
('gruppenr_org_under', 'gruppenr_for_org_sted'),
('adrlin1', 'adresselinje1_intern_adr'),
('adrlin2', 'adresselinje2_intern_adr'),
('postnr', 'poststednr_intern_adr'),
('adrlin1_besok', 'adresselinje1_besok_adr'),
('adrlin2_besok', 'adresselinje2_besok_adr'),
('postnr_besok', 'poststednr_besok_adr')):
if o[fs_col] is not None:
sted[xml_attr] = xml.escape_xml_attr(o[fs_col])
komm = []
for fs_col, typekode in (
('telefonnr', 'EKSTRA TLF'),
('faxnr', 'FAX'),
('emailadresse', 'EMAIL'),
('url', 'URL')
):
if o[fs_col]: # Skip NULLs and empty strings
komm.append(
{'kommtypekode': xml.escape_xml_attr(typekode),
'kommnrverdi': xml.escape_xml_attr(o[fs_col])})
# TODO: Kolonnene 'url' og 'bibsysbeststedkode' hentes ut fra
# FS, men tas ikke med i outputen herfra.
f.write('<sted ' +
' '.join(["%s=%s" % item for item in sted.items()]) +
'>\n')
for k in komm:
f.write('<komm ' +
' '.join(["%s=%s" % item for item in k.items()]) +
' />\n')
f.write('</sted>\n')
f.write("</data>\n")
f.close()
示例8: write_emne_info
# 需要导入模块: from Cerebrum.utils.atomicfile import SimilarSizeWriter [as 别名]
# 或者: from Cerebrum.utils.atomicfile.SimilarSizeWriter import max_pct_change [as 别名]
def write_emne_info(self, emne_info_file):
"""Lager fil med informasjon om alle definerte emner"""
logger.info("Writing emne info to '%s'", emne_info_file)
f = SimilarSizeWriter(emne_info_file, mode='w',
encoding=XML_ENCODING)
f.max_pct_change = 50
f.write(xml.xml_hdr + "<data>\n")
cols, dta = self._ext_cols(self.fs.info.list_emner())
for t in dta:
f.write(
xml.xmlify_dbrow(t, xml.conv_colnames(cols), 'emne') + "\n")
f.write("</data>\n")
f.close()
示例9: write_role_info
# 需要导入模块: from Cerebrum.utils.atomicfile import SimilarSizeWriter [as 别名]
# 或者: from Cerebrum.utils.atomicfile.SimilarSizeWriter import max_pct_change [as 别名]
def write_role_info(self, role_file):
"""Lager fil med informasjon om alle roller definer i FS.PERSONROLLE"""
logger.info("Writing role info to '%s'", role_file)
f = SimilarSizeWriter(role_file, mode='w', encoding=XML_ENCODING)
f.max_pct_change = 50
f.write(xml.xml_hdr + "<data>\n")
cols, role = self._ext_cols(
self.fs.undervisning.list_alle_personroller())
for r in role:
f.write(
xml.xmlify_dbrow(r, xml.conv_colnames(cols), 'rolle') + "\n")
f.write("</data>\n")
f.close()
示例10: write_netpubl_info
# 需要导入模块: from Cerebrum.utils.atomicfile import SimilarSizeWriter [as 别名]
# 或者: from Cerebrum.utils.atomicfile.SimilarSizeWriter import max_pct_change [as 别名]
def write_netpubl_info(self, netpubl_file):
"""Lager fil med informasjon om status nettpublisering"""
logger.info("Writing nettpubl info to '%s'", netpubl_file)
f = SimilarSizeWriter(netpubl_file, mode='w',
encoding=XML_ENCODING)
f.max_pct_change = 50
f.write(xml.xml_hdr + "<data>\n")
cols, nettpubl = self._ext_cols(self.fs.person.list_status_nettpubl())
for n in nettpubl:
f.write(xml.xmlify_dbrow(n,
xml.conv_colnames(cols),
'nettpubl') + "\n")
f.write("</data>\n")
f.close()
示例11: write_evukurs_info
# 需要导入模块: from Cerebrum.utils.atomicfile import SimilarSizeWriter [as 别名]
# 或者: from Cerebrum.utils.atomicfile.SimilarSizeWriter import max_pct_change [as 别名]
def write_evukurs_info(self, evu_kursinfo_file):
"""Skriv data om alle EVU-kurs (vi trenger dette bl.a. for å bygge
EVU-delen av CF)."""
logger.info("Writing evukurs info to '%s'", evu_kursinfo_file)
f = SimilarSizeWriter(evu_kursinfo_file, mode='w',
encoding=XML_ENCODING)
f.max_pct_change = 50
f.write(xml.xml_hdr + "<data>\n")
cols, evukurs = self._ext_cols(self.fs.evu.list_kurs())
for ek in evukurs:
f.write(
xml.xmlify_dbrow(
ek, xml.conv_colnames(cols), "evukurs") + "\n")
f.write("</data>\n")
f.close()
示例12: write_misc_info
# 需要导入模块: from Cerebrum.utils.atomicfile import SimilarSizeWriter [as 别名]
# 或者: from Cerebrum.utils.atomicfile.SimilarSizeWriter import max_pct_change [as 别名]
def write_misc_info(self, misc_file, tag, func_name):
"""Lager fil med data fra gitt funksjon i access_FS"""
logger.info("Writing misc info to '%s'", misc_file)
f = SimilarSizeWriter(misc_file, mode='w', encoding=XML_ENCODING)
f.max_pct_change = 50
f.write(xml.xml_hdr + "<data>\n")
func = reduce(
lambda obj, attr: getattr(obj, attr),
func_name.split('.'), self.fs)
cols, dta = self._ext_cols(func())
for t in dta:
self.fix_float(t)
f.write(xml.xmlify_dbrow(t, xml.conv_colnames(cols), tag) + "\n")
f.write("</data>\n")
f.close()
示例13: write_undenh_metainfo
# 需要导入模块: from Cerebrum.utils.atomicfile import SimilarSizeWriter [as 别名]
# 或者: from Cerebrum.utils.atomicfile.SimilarSizeWriter import max_pct_change [as 别名]
def write_undenh_metainfo(self, undervenh_file):
"""Skriv metadata om undervisningsenheter for inneværende+neste
semester."""
logger.info("Writing undenh_meta info to '%s'", undervenh_file)
f = SimilarSizeWriter(undervenh_file, mode='w',
encoding=XML_ENCODING)
f.max_pct_change = 50
f.write(xml.xml_hdr + "<undervenhet>\n")
for semester in ('current', 'next'):
cols, undenh = self._ext_cols(
self.fs.undervisning.list_undervisningenheter(sem=semester))
for u in undenh:
f.write(
xml.xmlify_dbrow(u, xml.conv_colnames(cols), 'undenhet') +
"\n")
f.write("</undervenhet>\n")
f.close()
示例14: write_person_info
# 需要导入模块: from Cerebrum.utils.atomicfile import SimilarSizeWriter [as 别名]
# 或者: from Cerebrum.utils.atomicfile.SimilarSizeWriter import max_pct_change [as 别名]
def write_person_info(self, person_file):
"""Lager fil med informasjon om alle personer registrert i FS som
vi muligens også ønsker å ha med i Cerebrum. En person kan
forekomme flere ganger i filen."""
# TBD: Burde vi cache alle data, slik at vi i stedet kan lage en
# fil der all informasjon om en person er samlet under en egen
# <person> tag?
logger.info("Writing person info to '%s'", person_file)
f = SimilarSizeWriter(person_file, mode='w',
encoding=XML_ENCODING)
f.max_pct_change = 50
f.write(xml.xml_hdr + "<data>\n")
# Aktive studenter
cols, students = self._ext_cols(self.fs.student.list_aktiv())
for s in students:
self.fix_float(s)
f.write(
xml.xmlify_dbrow(s, xml.conv_colnames(cols), 'aktiv') + "\n")
# Eksamensmeldinger
cols, students = self._ext_cols(
self.fs.student.list_eksamensmeldinger())
for s in students:
f.write(
xml.xmlify_dbrow(s, xml.conv_colnames(cols), 'eksamen') + "\n")
# EVU students
# En del EVU studenter vil være gitt av søket over
cols, students = self._ext_cols(self.fs.evu.list())
for e in students:
f.write(
xml.xmlify_dbrow(e, xml.conv_colnames(cols), 'evu') + "\n")
# Aktive fagpersoner
cols, fagperson = self._ext_cols(
self.fs.undervisning.list_fagperson_semester())
for p in fagperson:
f.write(
xml.xmlify_dbrow(
p, xml.conv_colnames(cols),
'fagperson') + "\n")
f.write("</data>\n")
f.close()
示例15: list_quotas
# 需要导入模块: from Cerebrum.utils.atomicfile import SimilarSizeWriter [as 别名]
# 或者: from Cerebrum.utils.atomicfile.SimilarSizeWriter import max_pct_change [as 别名]
def list_quotas(fname, hostname, diskname, spread):
f = SimilarSizeWriter(fname, "w")
f.max_pct_change = 10
disk = Factory.get("Disk")(db)
if diskname:
disk.find_by_path(diskname)
list_disk_quotas(f, disk.entity_id, spread)
elif hostname:
host = Factory.get("Host")(db)
host.find_by_name(hostname)
for row in disk.list(host_id=host.entity_id, spread=spread):
list_disk_quotas(f, row['disk_id'], spread)
else:
for row in disk.list_traits(co.trait_disk_quota):
list_disk_quotas(f, row['entity_id'], spread)
f.close()