本文整理汇总了Python中cvs2svn_lib.key_generator.KeyGenerator类的典型用法代码示例。如果您正苦于以下问题:Python KeyGenerator类的具体用法?Python KeyGenerator怎么用?Python KeyGenerator使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了KeyGenerator类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: start
def start(self):
self._mark_generator = KeyGenerator()
logger.normal("Starting generate_blobs.py...")
self._popen = subprocess.Popen(
[sys.executable, os.path.join(os.path.dirname(__file__), "generate_blobs.py"), self.blob_filename],
stdin=subprocess.PIPE,
)
示例2: __init__
def __init__(self):
# The revision number to assign to the next new SVNCommit.
self.revnum_generator = KeyGenerator()
# A set containing the Projects that have already been
# initialized:
self._initialized_projects = set()
示例3: start
def start(self):
self.revision_reader.start()
if self.blob_filename is None:
self.dump_file = open(
artifact_manager.get_temp_file(config.GIT_BLOB_DATAFILE), 'wb',
)
else:
self.dump_file = open(self.blob_filename, 'wb')
self._mark_generator = KeyGenerator()
示例4: __init__
def __init__(self, metadata_db):
self._metadata_db = metadata_db
# A map { digest : id }:
self._digest_to_id = {}
# A key_generator to generate keys for metadata that haven't been
# seen yet:
self.key_generator = KeyGenerator()
示例5: ExternalBlobGenerator
class ExternalBlobGenerator(RevisionCollector):
"""Have generate_blobs.py output file revisions to a blob file."""
def __init__(self, blob_filename):
self.blob_filename = blob_filename
def start(self):
self._mark_generator = KeyGenerator()
logger.normal("Starting generate_blobs.py...")
self._popen = subprocess.Popen(
[sys.executable, os.path.join(os.path.dirname(__file__), "generate_blobs.py"), self.blob_filename],
stdin=subprocess.PIPE,
)
def _process_symbol(self, cvs_symbol, cvs_file_items):
"""Record the original source of CVS_SYMBOL.
Determine the original revision source of CVS_SYMBOL, and store it
as the symbol's revision_reader_token."""
cvs_source = cvs_symbol.get_cvs_revision_source(cvs_file_items)
cvs_symbol.revision_reader_token = cvs_source.revision_reader_token
def process_file(self, cvs_file_items):
marks = {}
for lod_items in cvs_file_items.iter_lods():
for cvs_rev in lod_items.cvs_revisions:
if not isinstance(cvs_rev, CVSRevisionDelete):
mark = self._mark_generator.gen_id()
cvs_rev.revision_reader_token = mark
marks[cvs_rev.rev] = mark
# A separate pickler is used for each dump(), so that its memo
# doesn't grow very large. The default ASCII protocol is used so
# that this works without changes on systems that distinguish
# between text and binary files.
pickle.dump((cvs_file_items.cvs_file.rcs_path, marks), self._popen.stdin)
self._popen.stdin.flush()
# Now that all CVSRevisions' revision_reader_tokens are set,
# iterate through symbols and set their tokens to those of their
# original source revisions:
for lod_items in cvs_file_items.iter_lods():
if lod_items.cvs_branch is not None:
self._process_symbol(lod_items.cvs_branch, cvs_file_items)
for cvs_tag in lod_items.cvs_tags:
self._process_symbol(cvs_tag, cvs_file_items)
def finish(self):
self._popen.stdin.close()
logger.normal("Waiting for generate_blobs.py to finish...")
returncode = self._popen.wait()
if returncode:
raise FatalError("generate_blobs.py failed with return code %s." % (returncode,))
else:
logger.normal("generate_blobs.py is done.")
示例6: project_id
class MetadataLogger:
"""Store and generate IDs for the metadata associated with CVSRevisions.
We want CVSRevisions that might be able to be combined to have the
same metadata ID, so we want a one-to-one relationship id <->
metadata. We could simply construct a map {metadata : id}, but the
map would grow too large. Therefore, we generate a digest
containing the significant parts of the metadata, and construct a
map {digest : id}.
To get the ID for a new set of metadata, we first create the digest.
If there is already an ID registered for that digest, we simply
return it. If not, we generate a new ID, store the metadata in the
metadata database under that ID, record the mapping {digest : id},
and return the new id.
What metadata is included in the digest? The author, log_msg,
project_id (if Ctx().cross_project_commits is not set), and
branch_name (if Ctx().cross_branch_commits is not set)."""
def __init__(self, metadata_db):
self._metadata_db = metadata_db
# A map { digest : id }:
self._digest_to_id = {}
# A key_generator to generate keys for metadata that haven't been
# seen yet:
self.key_generator = KeyGenerator()
def store(self, project, branch_name, author, log_msg):
"""Store the metadata and return its id.
Locate the record for a commit with the specified (PROJECT,
BRANCH_NAME, AUTHOR, LOG_MSG) and return its id. (Depending on
policy, not all of these items are necessarily used when creating
the unique id.) If there is no such record, create one and return
its newly-generated id."""
key = [author, log_msg]
if not Ctx().cross_project_commits:
key.append('%x' % project.id)
if not Ctx().cross_branch_commits:
key.append(branch_name or '')
digest = sha.new('\0'.join(key)).digest()
try:
# See if it is already known:
return self._digest_to_id[digest]
except KeyError:
id = self.key_generator.gen_id()
self._digest_to_id[digest] = id
self._metadata_db[id] = Metadata(id, author, log_msg)
return id
示例7: __init__
class Substituter:
def __init__(self, template):
self.template = template
self.key_generator = KeyGenerator(1)
# A map from old values to new ones.
self.substitutions = {}
def get_substitution(self, s):
r = self.substitutions.get(s)
if r == None:
r = self.template % self.key_generator.gen_id()
self.substitutions[s] = r
return r
示例8: start
def start(self):
self._mark_generator = KeyGenerator()
logger.normal('Starting generate_blobs.py...')
if self.blob_filename is None:
blob_filename = artifact_manager.get_temp_file(config.GIT_BLOB_DATAFILE)
else:
blob_filename = self.blob_filename
self._pipe = subprocess.Popen(
[
sys.executable,
os.path.join(os.path.dirname(__file__), 'generate_blobs.py'),
blob_filename,
],
stdin=subprocess.PIPE,
)
示例9: open
def open(self):
"""Set up the RepositoryMirror and prepare it for commits."""
self._key_generator = KeyGenerator()
# A map from LOD to LODHistory instance for all LODs that have
# been referenced so far:
self._lod_histories = {}
# This corresponds to the 'nodes' table in a Subversion fs. (We
# don't need a 'representations' or 'strings' table because we
# only track file existence, not file contents.)
self._node_db = _NodeDatabase()
# Start at revision 0 without a root node.
self._youngest = 0
示例10: __init__
def __init__(
self, dump_filename, revision_writer,
max_merges=None, author_transforms=None,
):
"""Constructor.
DUMP_FILENAME is the name of the file to which the git-fast-import
commands for defining revisions should be written. (Please note
that depending on the style of revision writer, the actual file
contents might not be written to this file.)
REVISION_WRITER is a GitRevisionWriter that is used to output
either the content of revisions or a mark that was previously used
to label a blob.
MAX_MERGES can be set to an integer telling the maximum number of
parents that can be merged into a commit at once (aside from the
natural parent). If it is set to None, then there is no limit.
AUTHOR_TRANSFORMS is a map {cvsauthor : (fullname, email)} from
CVS author names to git full name and email address. All of the
contents should either be Unicode strings or 8-bit strings encoded
as UTF-8.
"""
self.dump_filename = dump_filename
self.revision_writer = revision_writer
self.max_merges = max_merges
def to_utf8(s):
if isinstance(s, unicode):
return s.encode('utf8')
else:
return s
self.author_transforms = {}
if author_transforms is not None:
for (cvsauthor, (name, email,)) in author_transforms.iteritems():
cvsauthor = to_utf8(cvsauthor)
name = to_utf8(name)
email = to_utf8(email)
self.author_transforms[cvsauthor] = (name, email,)
self._mirror = RepositoryMirror()
self._mark_generator = KeyGenerator(GitOutputOption._first_commit_mark)
示例11: __init__
def __init__(
self, revision_writer,
dump_filename=None,
author_transforms=None,
tie_tag_fixup_branches=False,
):
"""Constructor.
REVISION_WRITER is a GitRevisionWriter that is used to output
either the content of revisions or a mark that was previously used
to label a blob.
DUMP_FILENAME is the name of the file to which the git-fast-import
commands for defining revisions should be written. (Please note
that depending on the style of revision writer, the actual file
contents might not be written to this file.) If it is None, then
the output is written to stdout.
AUTHOR_TRANSFORMS is a map {cvsauthor : (fullname, email)} from
CVS author names to git full name and email address. All of the
contents should either be Unicode strings or 8-bit strings encoded
as UTF-8.
TIE_TAG_FIXUP_BRANCHES means whether after finishing with a tag
fixup branch, it should be psuedo-merged (ancestry linked but no
content changes) back into its source branch, to dispose of the
open head.
"""
DVCSOutputOption.__init__(self)
self.dump_filename = dump_filename
self.revision_writer = revision_writer
self.author_transforms = self.normalize_author_transforms(
author_transforms
)
self.tie_tag_fixup_branches = tie_tag_fixup_branches
self._mark_generator = KeyGenerator(GitOutputOption._first_commit_mark)
示例12: open
def open(self):
"""Set up the SVNRepositoryMirror and prepare it for SVNCommits."""
self._key_generator = KeyGenerator()
self._delegates = [ ]
# A map from LOD to LODHistory instance for all LODs that have
# been defines so far:
self._lod_histories = {}
# This corresponds to the 'nodes' table in a Subversion fs. (We
# don't need a 'representations' or 'strings' table because we
# only track metadata, not file contents.)
self._nodes_db = IndexedDatabase(
artifact_manager.get_temp_file(config.SVN_MIRROR_NODES_STORE),
artifact_manager.get_temp_file(config.SVN_MIRROR_NODES_INDEX_TABLE),
DB_OPEN_NEW, serializer=_NodeSerializer()
)
# Start at revision 0 without a root node. It will be created
# by _open_writable_root_node.
self._youngest = 0
示例13: GitRevisionCollector
class GitRevisionCollector(RevisionCollector):
"""Output file revisions to git-fast-import."""
def __init__(self, revision_reader, blob_filename=None):
self.revision_reader = revision_reader
self.blob_filename = blob_filename
def register_artifacts(self, which_pass):
self.revision_reader.register_artifacts(which_pass)
if self.blob_filename is None:
artifact_manager.register_temp_file(
config.GIT_BLOB_DATAFILE, which_pass,
)
def start(self):
self.revision_reader.start()
if self.blob_filename is None:
self.dump_file = open(
artifact_manager.get_temp_file(config.GIT_BLOB_DATAFILE), 'wb',
)
else:
self.dump_file = open(self.blob_filename, 'wb')
self._mark_generator = KeyGenerator()
def _process_revision(self, cvs_rev):
"""Write the revision fulltext to a blob if it is not dead."""
if isinstance(cvs_rev, CVSRevisionDelete):
# There is no need to record a delete revision, and its token
# will never be needed:
return
# FIXME: We have to decide what to do about keyword substitution
# and eol_style here:
fulltext = self.revision_reader.get_content(cvs_rev)
mark = self._mark_generator.gen_id()
self.dump_file.write('blob\n')
self.dump_file.write('mark :%d\n' % (mark,))
self.dump_file.write('data %d\n' % (len(fulltext),))
self.dump_file.write(fulltext)
self.dump_file.write('\n')
cvs_rev.revision_reader_token = mark
def _process_symbol(self, cvs_symbol, cvs_file_items):
"""Record the original source of CVS_SYMBOL.
Determine the original revision source of CVS_SYMBOL, and store it
as the symbol's revision_reader_token."""
cvs_source = cvs_symbol.get_cvs_revision_source(cvs_file_items)
cvs_symbol.revision_reader_token = cvs_source.revision_reader_token
def process_file(self, cvs_file_items):
for lod_items in cvs_file_items.iter_lods():
for cvs_rev in lod_items.cvs_revisions:
self._process_revision(cvs_rev)
# Now that all CVSRevisions' revision_reader_tokens are set,
# iterate through symbols and set their tokens to those of their
# original source revisions:
for lod_items in cvs_file_items.iter_lods():
if lod_items.cvs_branch is not None:
self._process_symbol(lod_items.cvs_branch, cvs_file_items)
for cvs_tag in lod_items.cvs_tags:
self._process_symbol(cvs_tag, cvs_file_items)
def finish(self):
self.revision_reader.finish()
self.dump_file.close()
示例14: process_changeset
class SVNCommitCreator:
"""This class creates and yields SVNCommits via process_changeset()."""
def __init__(self):
# The revision number to assign to the next new SVNCommit.
self.revnum_generator = KeyGenerator()
# A set containing the Projects that have already been
# initialized:
self._initialized_projects = set()
def _post_commit(self, cvs_revs, motivating_revnum, timestamp):
"""Generate any SVNCommits needed to follow CVS_REVS.
That is, handle non-trunk default branches. A revision on a CVS
non-trunk default branch is visible in a default CVS checkout of
HEAD. So we copy such commits over to Subversion's trunk so that
checking out SVN trunk gives the same output as checking out of
CVS's default branch."""
cvs_revs = [
cvs_rev
for cvs_rev in cvs_revs
if cvs_rev.ntdbr and not isinstance(cvs_rev, CVSRevisionNoop)
]
if cvs_revs:
cvs_revs.sort(
lambda a, b: cmp(a.cvs_file.rcs_path, b.cvs_file.rcs_path)
)
# Generate an SVNCommit for all of our default branch cvs_revs.
yield SVNPostCommit(
motivating_revnum, cvs_revs, timestamp,
self.revnum_generator.gen_id(),
)
def _process_revision_changeset(self, changeset, timestamp):
"""Process CHANGESET, using TIMESTAMP as the commit time.
Create and yield one or more SVNCommits in the process. CHANGESET
must be an OrderedChangeset. TIMESTAMP is used as the timestamp
for any resulting SVNCommits."""
if not changeset.cvs_item_ids:
logger.warn('Changeset has no items: %r' % changeset)
return
logger.verbose('-' * 60)
logger.verbose('CVS Revision grouping:')
logger.verbose(' Time: %s' % time.ctime(timestamp))
# Generate an SVNCommit unconditionally. Even if the only change in
# this group of CVSRevisions is a deletion of an already-deleted
# file (that is, a CVS revision in state 'dead' whose predecessor
# was also in state 'dead'), the conversion will still generate a
# Subversion revision containing the log message for the second dead
# revision, because we don't want to lose that information.
cvs_revs = list(changeset.iter_cvs_items())
if cvs_revs:
cvs_revs.sort(lambda a, b: cmp(a.cvs_file.rcs_path, b.cvs_file.rcs_path))
svn_commit = SVNPrimaryCommit(
cvs_revs, timestamp, self.revnum_generator.gen_id()
)
yield svn_commit
for cvs_rev in cvs_revs:
Ctx()._symbolings_logger.log_revision(cvs_rev, svn_commit.revnum)
# Generate an SVNPostCommit if we have default branch revs. If
# some of the revisions in this commit happened on a non-trunk
# default branch, then those files have to be copied into trunk
# manually after being changed on the branch (because the RCS
# "default branch" appears as head, i.e., trunk, in practice).
# Unfortunately, Subversion doesn't support copies with sources
# in the current txn. All copies must be based in committed
# revisions. Therefore, we generate the copies in a new
# revision.
for svn_post_commit in self._post_commit(
cvs_revs, svn_commit.revnum, timestamp
):
yield svn_post_commit
def _process_tag_changeset(self, changeset, timestamp):
"""Process TagChangeset CHANGESET, producing a SVNTagCommit.
Filter out CVSTagNoops. If no CVSTags are left, don't generate a
SVNTagCommit."""
if Ctx().trunk_only:
raise InternalError(
'TagChangeset encountered during a --trunk-only conversion')
cvs_tag_ids = [
cvs_tag.id
for cvs_tag in changeset.iter_cvs_items()
if not isinstance(cvs_tag, CVSTagNoop)
]
if cvs_tag_ids:
#.........这里部分代码省略.........
示例15: GitOutputOption
class GitOutputOption(DVCSOutputOption):
"""An OutputOption that outputs to a git-fast-import formatted file.
Members:
dump_filename -- (string) the name of the file to which the
git-fast-import commands for defining revisions will be
written.
author_transforms -- a map from CVS author names to git full name
and email address. See
DVCSOutputOption.normalize_author_transforms() for information
about the form of this parameter.
"""
name = "Git"
# The first mark number used for git-fast-import commit marks. This
# value needs to be large to avoid conflicts with blob marks.
_first_commit_mark = 1000000000
def __init__(
self, dump_filename, revision_writer,
author_transforms=None,
tie_tag_fixup_branches=False,
):
"""Constructor.
DUMP_FILENAME is the name of the file to which the git-fast-import
commands for defining revisions should be written. (Please note
that depending on the style of revision writer, the actual file
contents might not be written to this file.)
REVISION_WRITER is a GitRevisionWriter that is used to output
either the content of revisions or a mark that was previously used
to label a blob.
AUTHOR_TRANSFORMS is a map {cvsauthor : (fullname, email)} from
CVS author names to git full name and email address. All of the
contents should either be Unicode strings or 8-bit strings encoded
as UTF-8.
TIE_TAG_FIXUP_BRANCHES means whether after finishing with a tag
fixup branch, it should be psuedo-merged (ancestry linked but no
content changes) back into its source branch, to dispose of the
open head.
"""
DVCSOutputOption.__init__(self)
self.dump_filename = dump_filename
self.revision_writer = revision_writer
self.author_transforms = self.normalize_author_transforms(
author_transforms
)
self.tie_tag_fixup_branches = tie_tag_fixup_branches
self._mark_generator = KeyGenerator(GitOutputOption._first_commit_mark)
def register_artifacts(self, which_pass):
DVCSOutputOption.register_artifacts(self, which_pass)
self.revision_writer.register_artifacts(which_pass)
def check_symbols(self, symbol_map):
# FIXME: What constraints does git impose on symbols?
pass
def setup(self, svn_rev_count):
DVCSOutputOption.setup(self, svn_rev_count)
self.f = open(self.dump_filename, 'wb')
# The youngest revnum that has been committed so far:
self._youngest = 0
# A map {lod : [(revnum, mark)]} giving each of the revision
# numbers in which there was a commit to lod, and the mark active
# at the end of the revnum.
self._marks = {}
self.revision_writer.start(self._mirror, self.f)
def _create_commit_mark(self, lod, revnum):
mark = self._mark_generator.gen_id()
self._set_lod_mark(lod, revnum, mark)
return mark
def _set_lod_mark(self, lod, revnum, mark):
"""Record MARK as the status of LOD for REVNUM.
If there is already an entry for REVNUM, overwrite it. If not,
append a new entry to the self._marks list for LOD."""
assert revnum >= self._youngest
entry = (revnum, mark)
try:
modifications = self._marks[lod]
except KeyError:
# This LOD hasn't appeared before; create a new list and add the
#.........这里部分代码省略.........