当前位置: 首页>>代码示例>>Python>>正文


Python yaml.dump_all函数代码示例

本文整理汇总了Python中yaml.dump_all函数的典型用法代码示例。如果您正苦于以下问题:Python dump_all函数的具体用法?Python dump_all怎么用?Python dump_all使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了dump_all函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: rows

        def rows(self):
            opt = self.options
            with _open_archive(opt, mode = 'w') as _archive:
                _archive.delete(opt['dataset'] + '\\datadict.txt')
                _archive.delete(opt['dataset'] + '\\data.txt')
                _archive.delete(opt['dataset'] + '\\info.txt')

                with _write_utf8(_archive, opt['dataset'], 'datadict.txt') as out:
                    yaml.dump_all(opt['_variables'].filter(), out, default_flow_style = False, encoding = None)

                datadict_hash = out.digest.hexdigest()
                try:
                    with _write_utf8(_archive, opt['dataset'], 'data.txt') as out:
                        out.write('\t'.join(v.name for v in opt['_variables'].filter()) + '\n')
                        
                        while True:
                            row = (yield)
                            out.write('\t'.join(save_var(v, value) for v, value in opt['_variables'].pair_filter(row)) + '\n')                   
                except (GeneratorExit, StopIteration):
                    pass

                data_hash = out.digest.hexdigest()
                with _write_utf8(_archive, opt['dataset'], 'info.txt') as out:
                    yaml.dump({
                        'cases' : self.saved_rows,
                        'api_version' : API_VERSION,
                        'file_version' : opt['version'],
                        'data_hash' : data_hash,
                        'dict_hash' : datadict_hash,
                    }, out, encoding = None, default_flow_style = False)
开发者ID:jugovich,项目名称:bmanemail,代码行数:30,代码来源:csharp.py

示例2: test_unsafe

    def test_unsafe(self):
        dummy = Dummy()

        with self.assertRaises(yaml.representer.RepresenterError):
            yaml.dump_all([dummy])

        with self.assertRaises(yaml.representer.RepresenterError):
            yaml.dump(dummy, Dumper=yDumper)

        # reverse monkey patch and try again
        monkey_patch_pyyaml_reverse()

        with tempfile.TemporaryFile(suffix='.yaml') as f:
            yaml.dump_all([dummy], stream=f)
            f.seek(0)  # rewind

            doc_unsafe = yaml.load(f)
            self.assertTrue(type(doc_unsafe) is Dummy)

            monkey_patch_pyyaml()
            with self.assertRaises(yaml.constructor.ConstructorError):
                f.seek(0)  # rewind
                safe_yaml_load(f)

            with self.assertRaises(yaml.constructor.ConstructorError):
                f.seek(0)  # rewind
                yaml.load(f)
开发者ID:DataDog,项目名称:dd-agent,代码行数:27,代码来源:test_utils_yaml.py

示例3: dump

    def dump(self, path):
        # create path as directory
        import os
        os.makedirs(path, exist_ok=True)
        for subdir in [self.move_dir, self.predicate_dir]:
            os.makedirs('{!s}/{!s}'.format(path, subdir), exist_ok=True)
        # gather predicates and moves and set in directories
        predicates = []
        moves      = []
        algorithms = []
        for entity in self.entities:
            name = entity.__class__.__name__
            if   name ==      Move.__name__:      moves.append(entity)
            elif name == Predicate.__name__: predicates.append(entity)
            elif name == Algorithm.__name__: algorithms.append(entity)
            else: raise Exception('Encountered an invalid object: {!r}'.format(name))

        for p in predicates:
            with open('/'.join([path, self.predicate_dir, p.filename]), 'w') as f:
                f.writelines(p._definition)

        for p in moves:
            with open('/'.join([path, self.move_dir, p.filename]), 'w') as f:
                f.writelines(p._definition)

        yaml.dump_all(self.sorted(),
                      open('{}/{}'.format(path, self.description_document), 'w'),
                      explicit_start=True)
开发者ID:franzlarsson,项目名称:smppaper,代码行数:28,代码来源:test.py

示例4: format_info

def format_info(value, format, cols_width=None, dumper=None):
    if format in(INFO_FORMAT.DICT, INFO_FORMAT.JSON, INFO_FORMAT.YAML):
        value['component_details'] = json_loads(value['component_details'])

    if format == INFO_FORMAT.JSON:
        return json_dumps(value)

    elif format == INFO_FORMAT.YAML:
        buff = StringIO()
        yaml.dump_all([value], default_flow_style=False, indent=4, Dumper=dumper, stream=buff)
        value = buff.getvalue()
        buff.close()

        return value

    elif format == INFO_FORMAT.TEXT:
        cols_width = (elem.strip() for elem in cols_width.split(','))
        cols_width = [int(elem) for elem in cols_width]

        table = Texttable()
        table.set_cols_width(cols_width)

        # Use text ('t') instead of auto so that boolean values don't get converted into ints
        table.set_cols_dtype(['t', 't'])

        rows = [['Key', 'Value']]
        rows.extend(sorted(value.items()))

        table.add_rows(rows)

        return table.draw()

    else:
        return value
开发者ID:Aayush-Kasurde,项目名称:zato,代码行数:34,代码来源:component_info.py

示例5: save

 def save(self, filename, silent=True):
     """Save this configuration as a YAML file. YAML files generally have 
     the ``.yaml`` or ``.yml`` extension. If the filename ends in 
     ``.dat``, the configuration will be saved as a raw dictionary literal.
     
     :param string filename: The filename on which to save the configuration.
     :param bool silent: Unused.
     
     """
     if hasattr(filename,'read') and hasattr(filename,'readlines'):
         filename.write("# %s: <stream>" % self.name)
         yaml.dump_all(self._save_yaml_callback() + [self.store],
              filename, default_flow_style=False, encoding='utf-8', Dumper=PyshellDumper)
     else:
         with open(filename, "w") as stream:
             stream.write("# %s: %s\n" % (self.name, filename))
             if re.search(r"(\.yaml|\.yml)$", filename):
                 yaml.dump_all(
                     self._save_yaml_callback() + [self.store], stream, 
                     default_flow_style=False, encoding='utf-8', Dumper=PyshellDumper)
             elif re.search(r"\.dat$", filename):
                 for document in self._save_yaml_callback():
                     stream.write(str(document))
                     stream.write("\n---\n")
                 stream.write(str(self.store))
             elif not silent:
                 raise ValueError("Filename Error, not "
                     "(.dat,.yaml,.yml): %s" % filename)
         self._filename = filename
开发者ID:alexrudy,项目名称:pyshell,代码行数:29,代码来源:core.py

示例6: dump_example

def dump_example( yml_name ):
  """
  Example demonstrating how to load several pieces of data into a YAML file.
  """

  # Create some slush data to put in the YAML file.

  # A single basic type.
  foo = "bar"

  # A built-in aggregated type.
  mammals = {}
  mammals["cat"]     = "Frisky"
  mammals["camel"]   = "Humpy"
  mammals["dolphin"] = "Flipper"

  # A single user-defined type.
  dumb = dummy.Dummy("First dummy!")

  # An aggregation of user-defined types. (uses list comprehension)
  dum_dums = [dummy.Dummy("Dum Dum %s" % x) for x in range(0,5)]

  # Open the YAML file for writing.
  yml = open( yml_name, 'w' )

  # Use the dump_all() method to write to the YAML file. The dump_all()
  # method takes a list or generator and will write all data to the
  # YAML file.
  data = [foo, mammals, dumb, dum_dums]
  yaml.dump_all(data, yml)
  yml.close()
开发者ID:civissmith,项目名称:python_examples,代码行数:31,代码来源:yaml_cereal.py

示例7: write

    def write(self, correlation_iterable, stream):
        """
        :type correlation_iterable: Iterable
"""
        yaml.add_representer(Correlation, CorrelationSerializerYaml._correlation_yaml_representer)

        yaml.dump_all(correlation_iterable, stream = stream, default_flow_style = False)
开发者ID:4sp1r3,项目名称:modsecurity-exception-factory,代码行数:7,代码来源:correlation_serializer_yaml.py

示例8: main

def main(datasets):
    logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)

    for dataset in datasets:
        path = Path(dataset)

        if path.is_dir():
            paths = list(path.glob('*.xml'))
        elif path.suffix != '.xml':
            raise RuntimeError('want xml')
        else:
            paths = [path]

        documents = []
        for path in paths:
            logging.info("Processing %s...", path)
            try:
                documents += prepare_dataset(path)
            except Exception as e:
                logging.info("Failed: %s", e)

        if documents:
            yaml_path = str(path.parent.joinpath('agdc-metadata.yaml'))
            logging.info("Writing %s dataset(s) into %s", len(documents), yaml_path)
            with open(yaml_path, 'w') as stream:
                yaml.dump_all(documents, stream)
        else:
            logging.info("No datasets discovered. Bye!")
开发者ID:ceos-seo,项目名称:Data_Cube_v2,代码行数:28,代码来源:modisprepare.py

示例9: create_season_config

def create_season_config(config, db, output_file):
	info("Checking for new shows")
	shows = _get_primary_source_shows(config)
	
	debug("Outputting new shows")
	with open(output_file, "w", encoding="utf-8") as f:
		yaml.dump_all(shows, f, explicit_start=True, default_flow_style=False)
开发者ID:flipstables,项目名称:holo,代码行数:7,代码来源:module_find_shows.py

示例10: dump_tree

def dump_tree(tree, fd, ctx):
    """
    Dump a tree of objects, possibly containing custom types, to YAML.

    Parameters
    ----------
    tree : object
        Tree of objects, possibly containing custom data types.

    fd : pyasdf.generic_io.GenericFile
        A file object to dump the serialized YAML to.

    ctx : Context
        The writing context.
    """
    if not isinstance(ctx, Context):
        ctx = Context(ctx)

    class AsdfDumperTmp(AsdfDumper):
        pass
    AsdfDumperTmp.ctx = ctx

    tag = tree.yaml_tag
    tag = tag[:tag.index('/core/asdf') + 1]
    tree = custom_tree_to_tagged_tree(tree, ctx)
    validate_tagged_tree(tree, ctx)

    yaml.dump_all(
        [tree], stream=fd, Dumper=AsdfDumperTmp,
        explicit_start=True, explicit_end=True,
        version=ctx.versionspec.yaml_version,
        allow_unicode=True,
        encoding='utf-8',
        tags={'!': tag})
开发者ID:ejeschke,项目名称:pyasdf,代码行数:34,代码来源:yamlutil.py

示例11: save_to_file

 def save_to_file(self, file_path):
     """
         Saves the replay data to a YAML file.
     """
     
     with open(file_path, "w") as f:
         yaml.dump_all(self.entries, f, explicit_start=True)
开发者ID:Lyrositor,项目名称:ATBP-Tools,代码行数:7,代码来源:replay.py

示例12: merge

    def merge(self, merged_output):
        """
        # Concatenate the two input files together along with a generated
        #  audit header. Dump the result in yaml and xml formats
        """

        # Create an audit header
        a = audit_header.AuditHeader()
        a.set_fields('tabulator_aggregation',
                     'Pito Salas', 'TTV Tabulator TAB02', 
                     'TTV Tabulator 0.1 JUL-1-2008', self.new_prov)

        # Dump merge into a file in yaml format
        with open(''.join([merged_output,'.yml']), 'w') as stream:
            stream.write(a.serialize_yaml())
            yaml.dump_all(self.b1, stream)
            stream.write('---\n')
            yaml.dump_all(self.b2, stream)

        # Dump merge into a file in xml format        
        with open(''.join([merged_output,'.xml']), 'w') as stream:
            stream.write(a.serialize_xml())
            for file in (self.b1, self.b2):
                for record in file:
                    stream.writelines(xml_serialize(record, 0))
开发者ID:trustthevote,项目名称:Tabulator-v1,代码行数:25,代码来源:merger.py

示例13: export

def export(options):
    log_entries = []

    cn = MySQLdb.connect(host=options.host, user=options.username, 
            passwd=options.password, db=options.db, use_unicode=True)

    cur = cn.cursor()
    cur.execute('SELECT id, post_title, post_date, guid FROM wp_posts '
            'INNER JOIN wp_term_relationships ON '
            'wp_term_relationships.object_id = wp_posts.id '
            'WHERE post_status = %s AND term_taxonomy_id = %s '
            'ORDER BY post_date ASC', ('publish', 14))
    for row in cur.fetchall():
        id, title, date, guid = row
        entry = {'Title': title, 'Date': date, 'GUID': guid}
        subcur = cn.cursor()
        subcur.execute('SELECT meta_key, meta_value FROM wp_postmeta '
                'WHERE post_id = %s', (id,))
        for key, value in subcur.fetchall():
            if key == '_readinglog_url': entry['URL'] = value
            elif key == '_readinglog_author': entry['Author'] = value
            elif key == '_readinglog_rating': entry['Rating'] = float(value)
        log_entries.append(entry)

    if options.output is not None:
        f = open(options.output, 'w')
    else:
        f = sys.stdout
    
    yaml.add_representer(unicode, lambda dumper, value: 
            dumper.represent_scalar(u'tag:yaml.org,2002:str', value))
    yaml.dump_all(log_entries, f, default_flow_style=False, allow_unicode=True)
开发者ID:danc86,项目名称:constance,代码行数:32,代码来源:export_readinglog_wp.py

示例14: save

def save(config_list, file_name):
    res = [cfg._cfg for cfg in config_list]

    with file(file_name, 'w') as f:
        if len(res) > 1:
            yaml.dump_all(res, f)
        else:
            yaml.dump(res, f)
开发者ID:ContinuumIO,项目名称:hexrd,代码行数:8,代码来源:__init__.py

示例15: dumps

def dumps(obj):
    """Dump a python object -> blob and apply our pretty styling."""
    buff = six.BytesIO()
    yaml.dump_all([obj], buff,
                  explicit_start=True, indent=2,
                  default_flow_style=False,
                  line_break="\n", Dumper=PrettySafeDumper,
                  allow_unicode=True)
    return buff.getvalue()
开发者ID:openstack,项目名称:releases,代码行数:9,代码来源:yamlutils.py


注:本文中的yaml.dump_all函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。