本文整理汇总了Python中pycvsanaly2.utils.printerr函数的典型用法代码示例。如果您正苦于以下问题:Python printerr函数的具体用法?Python printerr怎么用?Python printerr使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了printerr函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_line_types
def get_line_types(repo, repo_uri, rev, path):
"""Returns an array, where each item means a line of code.
Each item is labled 'code', 'comment' or 'empty'"""
#profiler_start("Processing LineTypes for revision %s:%s", (self.rev, self.file_path))
uri = os.path.join(repo_uri, path) # concat repo_uri and file_path for full path
file_content = _get_file_content(repo, uri, rev) # get file_content
if file_content is None or file_content == '':
printerr("[get_line_types] Error: No file content for " + str(rev) + ":" + str(path) + " found! Skipping.")
line_types = None
else:
try:
lexer = get_lexer_for_filename(path)
except ClassNotFound:
try:
printdbg("[get_line_types] Guessing lexer for" + str(rev) + ":" + str(path) + ".")
lexer = guess_lexer(file_content)
except ClassNotFound:
printdbg("[get_line_types] No guess or lexer found for " + str(rev) + ":" + str(path) + ". Using TextLexer instead.")
lexer = TextLexer()
if isinstance(lexer, NemerleLexer):
# this lexer is broken and yield an unstoppable process
# see https://bitbucket.org/birkenfeld/pygments-main/issue/706/nemerle-lexer-ends-in-an-infinite-loop
lexer = TextLexer()
# Not shure if this should be skipped, when the language uses off-side rules (e.g. python,
# see http://en.wikipedia.org/wiki/Off-side_rule for list)
stripped_code = _strip_lines(file_content)
lexer_output = _iterate_lexer_output(lexer.get_tokens(stripped_code))
line_types_str = _comment_empty_or_code(lexer_output)
line_types = line_types_str.split("\n")
return line_types
示例2: listen_for_data
def listen_for_data(self, repo_func, watcher):
def write_line(data, io):
io.write(data)
io = BytesIO()
wid = self.repo.add_watch(watcher, write_line, io)
# Git doesn't need retries because all of the revisions
# are already on disk
if self.repo_type == "git":
retries = 0
else:
retries = 3
done = False
failed = False
# Try downloading the file revision
while not done and not failed:
try:
repo_func(os.path.join(self.repo_uri, self.path), self.rev)
done = True
except RepositoryCommandError, e:
if retries > 0:
printerr("Command %s returned %d(%s), try again", (e.cmd, e.returncode, e.error))
retries -= 1
io.seek(0)
elif retries == 0:
failed = True
printerr(
"Error obtaining %[email protected]%s. " + "Command %s returned %d(%s)",
(self.path, self.rev, e.cmd, e.returncode, e.error),
)
except:
示例3: get_patch_for_commit
def get_patch_for_commit(self):
def diff_line(data, io):
io.write(data)
io = BytesIO()
wid = self.repo.add_watch(DIFF, diff_line, io)
done = False
failed = False
retries = 3
while not done and not failed:
try:
self.repo.show(self.repo_uri, self.rev)
self.data = to_utf8(io.getvalue().strip()).decode("utf-8")
done = True
except (CommandError, CommandRunningError) as e:
if retries > 0:
printerr("Error running show command: %s, trying again",
(str(e),))
retries -= 1
io.seek(0)
elif retries <= 0:
failed = True
printerr("Error running show command: %s, FAILED",
(str(e),))
self.data = None
self.repo.remove_watch(DIFF, wid)
return self.data
示例4: __init__
def __init__(self, repo, uri):
LineCounter.__init__(self, repo, uri)
self.commit_pattern = re.compile("^(\w+) ")
self.file_pattern = re.compile("^(\d+)\s+(\d+)\s+([^\s].*)$")
# Dictionary for storing added, removed pairs, keyed by commit.
self.lines = {}
# Dictionary for storing list of paths, keyed by commit.
self.paths = {}
# Dictionary for storing added, removed pairs, keyed by commit.
# and path
self.lines_files = {}
# Run git command
self.git = find_program('git')
if self.git is None:
raise ExtensionRunError("Error running CommitsLOCDet extension: " +
"required git command cannot be found in path")
cmd = [self.git, 'log',
'--all', '--topo-order', '--numstat', '--pretty=oneline']
c = Command(cmd, uri)
try:
c.run(parser_out_func=self.__parse_line)
except CommandError, e:
if e.error:
printerr("Error running git log command: %s", (e.error,))
raise ExtensionRunError("Error running " +
"CommitsLOCDet extension: %s", str(e))
示例5: _do_backout
def _do_backout(self, repo, uri, db, backout_statement):
connection = db.connect()
repo_cursor = connection.cursor()
repo_uri = get_repo_uri(uri, repo)
try:
repo_id = get_repo_id(repo_uri, repo_cursor, db)
except RepoNotFound:
# Repository isn't in there, so it's likely already backed out
printerr("Repository not found, is it in the database?")
return True
finally:
repo_cursor.close()
update_cursor = connection.cursor()
execute_statement(statement(backout_statement, db.place_holder),
(repo_id,),
update_cursor,
db,
"Couldn't backout extension",
exception=ExtensionBackoutError)
update_cursor.close()
connection.commit()
connection.close()
示例6: run
def run (self, repo, repo_uri):
profiler_start("Running BlameJob for %[email protected]%s", (self.path,self.rev))
def blame_line (line, p):
p.feed (line)
repo_type = repo.get_type ()
if repo_type == 'cvs':
# CVS paths contain the module stuff
uri = repo.get_uri_for_path (repo_uri)
module = uri[len (repo.get_uri ()):].strip ('/')
if module != '.':
path = self.path[len (module):].strip ('/')
else:
path = self.path.strip ('/')
else:
path = self.path.strip ('/')
filename = os.path.basename (self.path)
p = create_parser (repo.get_type (), self.path)
out = self.get_content_handler()
p.set_output_device (out)
wid = repo.add_watch (BLAME, blame_line, p)
try:
repo.blame (os.path.join (repo_uri, path), self.rev)
self.collect_results(out)
except RepositoryCommandError, e:
self.failed = True
printerr ("Command %s returned %d (%s)", (e.cmd, e.returncode, e.error))
示例7: __prepare_table
def __prepare_table(self, connection, drop_table=False):
cursor = connection.cursor()
# Drop the table's old data
if drop_table:
try:
cursor.execute("DROP TABLE hunks")
except Exception, e:
printerr("Couldn't drop hunks table because %s", (e,))
示例8: __prepare_table
def __prepare_table(self, connection, drop_table=False):
# Drop the table's old data
if drop_table:
cursor = connection.cursor()
try:
cursor.execute("DROP TABLE content")
except Exception, e:
printerr("Couldn't drop content table because %s", (e,))
finally:
示例9: get_extension
def get_extension (extension_name):
if extension_name not in _extensions:
try:
__import__ ("pycvsanaly2.extensions.%s" % extension_name)
except ImportError as e:
printerr("Error in importing extension %s: %s", (extension_name, str(e)))
if extension_name not in _extensions:
raise ExtensionUnknownError ('Extension %s not registered' % extension_name)
return _extensions[extension_name]
示例10: get_patch_for_commit
def get_patch_for_commit (self, rev):
def diff_line (data, io):
io.write (data)
io = StringIO ()
wid = self.repo.add_watch (DIFF, diff_line, io)
try:
self.repo.show (self.repo_uri, rev)
data = io.getvalue ()
except Exception, e:
printerr ("Error running show command: %s", (str (e)))
data = None
示例11: iter_file_patch
def iter_file_patch(iter_lines, allow_dirty=False):
'''
:arg iter_lines: iterable of lines to parse for patches
:kwarg allow_dirty: If True, allow comments and other non-patch text
before the first patch. Note that the algorithm here can only find
such text before any patches have been found. Comments after the
first patch are stripped away in iter_hunks() if it is also passed
allow_dirty=True. Default False.
'''
### FIXME: Docstring is not quite true. We allow certain comments no
# matter what, If they startwith '===', '***', or '#' Someone should
# reexamine this logic and decide if we should include those in
# allow_dirty or restrict those to only being before the patch is found
# (as allow_dirty does).
regex = re.compile(binary_files_re)
saved_lines = []
orig_range = 0
beginning = True
for line in iter_lines:
if line.startswith('=== ') or line.startswith('*** '):
continue
if line.startswith('#'):
continue
elif orig_range > 0:
if line.startswith('-') or line.startswith(' '):
orig_range -= 1
elif line.startswith('--- ') or regex.match(line):
if allow_dirty and beginning:
# Patches can have "junk" at the beginning
# Stripping junk from the end of patches is handled when we
# parse the patch
beginning = False
elif len(saved_lines) > 0:
yield saved_lines
saved_lines = []
elif line.startswith('@@'):
try:
hunk = hunk_from_header(line)
except MalformedHunkHeader, e:
if allow_dirty:
printerr("\nError: MalformedHunkHeader; Probably merge commit. Skipping.")
continue
raise e
orig_range = hunk.orig_range
saved_lines.append(line)
示例12: _get_file_content
def _get_file_content(repo, uri, rev):
"""Reads the content of a file and revision from a given repository"""
def write_line(data, io):
io.write(data)
io = BytesIO()
wid = repo.add_watch(CAT, write_line, io)
try:
repo.cat(uri, rev)
file_content = to_utf8(io.getvalue()).decode("utf-8")
file_content = _convert_linebreaks(file_content) #make shure we do have the same new lines.
except Exception as e:
printerr("[get_line_types] Error running show command: %s, FAILED", (str(e),))
file_content = None
repo.remove_watch(CAT, wid)
return file_content
示例13: __init__
def __init__(self, repo, uri):
LineCounter.__init__(self, repo, uri)
self.git = find_program('git')
if self.git is None:
raise ExtensionRunError("Error running CommitsLOC extension: " +
"required git command cannot be found in path")
self.lines = {}
cmd = [self.git, 'log', '--all', '--topo-order', '--shortstat', '--pretty=oneline', 'origin']
c = Command(cmd, uri)
try:
c.run(parser_out_func=self.__parse_line)
except CommandError, e:
if e.error:
printerr("Error running git log command: %s", (e.error,))
raise ExtensionRunError("Error running CommitsLOC extension: %s", str(e))
示例14: __process_finished_jobs
def __process_finished_jobs(self, job_pool, connection, db):
if isinstance(self.db, SqliteDatabase):
from sqlite3 import IntegrityError
elif isinstance(self.db, MysqlDatabase):
from MySQLdb import IntegrityError
write_cursor = connection.cursor()
finished_job = job_pool.get_next_done(0)
processed_jobs = 0
# commit_id is the commit ID. For some reason, the
# documentation advocates tablename_id as the reference,
# but in the source, these are referred to as commit IDs.
# Don't ask me why!
while finished_job is not None:
query = """
insert into content(commit_id, file_id, content, loc, size)
values(?,?,?,?,?)"""
insert_statement = statement(query, db.place_holder)
parameters = (
finished_job.commit_id,
finished_job.file_id,
finished_job.file_contents,
finished_job.file_number_of_lines,
finished_job.file_size,
)
try:
write_cursor.execute(insert_statement, parameters)
except IntegrityError as e:
if isinstance(self.db, MysqlDatabase) and e.args[0] == 1062:
# Ignore duplicate entry
pass
else:
printerr(
"Error while inserting content for file %d @ commit %d"
% (finished_job.file_id, finished_job.commit_id)
)
raise
processed_jobs += 1
finished_job = job_pool.get_next_done(0)
connection.commit()
write_cursor.close()
return processed_jobs
示例15: iter_hunks
def iter_hunks(iter_lines, allow_dirty=False):
'''
:arg iter_lines: iterable of lines to parse for hunks
:kwarg allow_dirty: If True, when we encounter something that is not
a hunk header when we're looking for one, assume the rest of the lines
are not part of the patch (comments or other junk). Default False
'''
hunk = None
for line in iter_lines:
if line == "\n":
if hunk is not None:
yield hunk
hunk = None
continue
if hunk is not None:
yield hunk
try:
hunk = hunk_from_header(line)
except MalformedHunkHeader:
if allow_dirty:
# If the line isn't a hunk header, then we've reached the end
# of this patch and there's "junk" at the end. Ignore the
# rest of this patch.
return
raise
orig_size = 0
mod_size = 0
while orig_size < hunk.orig_range or mod_size < hunk.mod_range:
try:
hunk_line = parse_line(iter_lines.next())
hunk.lines.append(hunk_line)
if isinstance(hunk_line, (RemoveLine, ContextLine)):
orig_size += 1
if isinstance(hunk_line, (InsertLine, ContextLine)):
mod_size += 1
except StopIteration:
break
except MalformedLine, e:
if allow_dirty:
printerr("\nError: MalformedLine; Probably binary file. Skipping line.")
continue
raise e