本文整理汇总了Python中sys.stderr.flush函数的典型用法代码示例。如果您正苦于以下问题:Python flush函数的具体用法?Python flush怎么用?Python flush使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了flush函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: parseGameMessage
def parseGameMessage(self, parts):
if parts[0] == "game":
gameStateMessage = parts[1]
if gameStateMessage== 'round':
#update game round i
self.round = int(parts[2])
elif gameStateMessage == 'this_piece_type':
#update game this_piece_type s
self.this_piece_type = parts[2]
elif gameStateMessage == 'next_piece_type':
#update game next_piece_type s
self.next_piece_type = parts[2]
elif gameStateMessage == 'this_piece_position':
#update game this_piece_position i,i
position = parts[2].split(',')
self.this_piece_position["x"] = int(position[0])
self.this_piece_position["y"] = int(position[1])
else:
stderr.write('Unknown gameStateMessage: %s\n' % (parts[1]))
stderr.flush()
elif parts[0] == self.settings.bots["me"]:
self.players["me"].parsePlayerState(parts[1:])
elif parts[0] == self.settings.bots["opponent"]:
self.players["opponent"].parsePlayerState(parts[1:])
else:
stderr.write('Unknown gameStateMessage: %s\n' % (parts[0]))
stderr.flush()
示例2: log_error
def log_error(self, message, level=LOG_ERR):
"""Logging method with the same functionality like in Request object.
But as get_options read configuration from os.environ which could
not work in same wsgi servers like Apaches mod_wsgi.
This method write to stderr so messages, could not be found in
servers error log!
"""
if self.__log_level[0] >= level[0]:
if _unicode_exist and isinstance(message, unicode):
message = message.encode('utf-8')
try:
stderr.write("<%s> [%s] %s\n" % (level[1], self.__name,
message))
except UnicodeEncodeError:
if _unicode_exist:
message = message.decode('utf-8').encode(
'ascii', 'backslashreplace')
else:
message = message.encode(
'ascii', 'backslashreplace').decode('ascii')
stderr.write("<%s> [%s] %s\n" % (level[1], self.__name,
message))
stderr.flush()
示例3: main
def main(args):
try:
args.files = list(set(args.files))
for file_path in args.files:
if not file_path:
continue
if args.verbose:
print("Processing: {0}".format(file_path))
try:
with open(file_path, "rb") as input_file:
pdf_data = input_file.read()
except IOError as e:
stderr.write("{0}: {1}\n".format(file_path, e.strerror))
stderr.flush()
continue
# Backup the file with a different name
if not args.no_backup:
if args.verbose:
print("Creating backup: {0}.OLD".format(file_path))
shutil.move(file_path, "{0}.OLD".format(file_path))
# Modify the PDF file
new_pdf_data = remove_evil_links(pdf_data)
# Save the new file
with open(file_path, "wb") as out_file:
out_file.write(new_pdf_data)
if args.verbose:
print("Saving modified file: {0}".format(file_path))
except KeyboardInterrupt:
raise
示例4: _encoder_transform
def _encoder_transform(X_s,layers,batch_range):
"""
Parameters:
----------
X_s: input data
layers: neuron layers (input shape + hidden layers)
batch_range: size of minibatch
Returs:
Input data with fetures as most latent representation
"""
ae= autoencoder(dimensions=layers)
learning_rate = 0.001
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(ae['cost'])
sess = tf.Session()
sess.run(tf.initialize_all_variables())
n_epoch=100
for epoch_i in range(n_epoch):
for start, end in zip(range(0, len(X_s), batch_range),range(batch_range, len(X_s), batch_range)):
input_ = X_s[start:end]
sess.run(optimizer, feed_dict={ae['x']: input_, ae['corrupt_prob']: [1.0]})
s="\r Epoch: %d Cost: %f"%(epoch_i, sess.run(ae['cost'],
feed_dict={ae['x']: X_s, ae['corrupt_prob']: [1.0]}))
stderr.write(s)
stderr.flush()
Z_0 = sess.run(ae['z'], feed_dict={ae['x']: X_s, ae['corrupt_prob']: [0.0]})
sess.close()
return Z_0
示例5: main
def main(args):
napalm = {"name": "Napalm", "func": Napalm, "flag_set": args.napalm}
mamont = {"name": "Mamont", "func": Mamont, "flag_set": args.mamont}
filewatcher = {"name": "FileWatcher", "func": Filewatcher, "flag_set": args.filewatcher}
filemare = {"name": "FileMare", "func": Filemare, "flag_set": args.filemare}
custom_functions = []
for routine in (napalm, mamont, filewatcher, filemare):
if(routine["flag_set"]):
custom_functions.append(routine)
# Process -fw, -fm, -ma, -na flags if they are set
if(len(custom_functions) > 0):
functions = custom_functions
else:
functions = (napalm, mamont, filewatcher, filemare)
# Start the scraping process
for function in functions:
try:
stderr.write("\t-=[ {0} ]=-\n".format(function["name"]))
stderr.flush()
function["func"](args).search()
except(KeyboardInterrupt, EOFError):
continue
stderr.write("\n")
stderr.flush()
示例6: _album
def _album(self, source):
""" Returns a list of image files from the passed source on success. """
try:
# Obtain the album image objects from the json data which is found
# in the passed source
if "<h2>All Categories</h2>" in source or "<a id=\"images\"" in source:
j = self._get_var_collectionData(source, "search")
else:
j = self._get_var_collectionData(source)
if not j:
raise EOFError
images = j.get("items").get("objects")
if not images:
raise EOFError
except(EOFError):
return "End of album"
# Try to detect the first page and print the estimated file count
# to stderr.
if(j["pageNumber"] == 1):
self._print_album_stats(source)
stderr.flush()
image_objects = []
for obj in images:
new_link = obj.get("fullsizeUrl")
up = urlparse(new_link)
new_link = "{0}~original".format(up.geturl())
obj["originalUrl"] = new_link
image_objects.append(ImageInfo(obj["name"], **obj))
image_objects = [ImageInfo(obj["name"], **obj) for obj in images if obj]
return image_objects
示例7: benchmark
def benchmark(args, commit, commits):
# print progress indicator
stderr.write("\r{}/{} {}".format(commits.index(commit), len(commits), commit))
stderr.flush()
# checkout the given commit
run_from_repo(args, ["git", "checkout", "--force", commit])
try:
# run the benchmarks and store results as a stream
stream = StringIO(run_benchmarks(args))
except CalledProcessError as e:
# failure likely means a commit from before the benchmarks existed
return {}
# decode stream as CSV table
benchmarks = benchmarks_from_buffer(stream)
# refine table
report = table_from_benchmarks(benchmarks)
# determine the columns to extract
title_row = report[0]
name_index = title_row.index('name')
cpu_time_index = title_row.index('cpu_time')
# return list of tuples of benchmark test name to result
return {cell[name_index]: cell[cpu_time_index] for cell in report[1:]}
示例8: _get_output_dir
def _get_output_dir(self):
""" Returns the output directory, either the pwd or the directory
defined in the passed arguments.
Subalbums are given a subdirectory to store their images in.
"""
out = self._args.output_directory
if not out:
# Define the present working directory if it wasn't passed explicitly
# with the -o/--output-directory argument.
out = os.path.join(os.getcwd(), 'photobucket')
elif out.startswith("~"):
# Resolve the tilde char (which is the home directory on *nix) to
# it's actual destination.
home = os.environ.get("HOME")
if not home:
out = os.getcwd()
else:
out = os.path.join(home, out[1:])
if not os.path.isdir(out) and not os.path.isfile(out):
try:
os.makedirs(out)
except(OSError, IOError):
stderr.write("Failed to create output directory,",\
"does it already exist?\n")
stderr.flush()
exit(1)
# Add a trailing slash (or backslash) to the download directory, this is
# necessary otherwise we would get an error when trying to write the down-
# loaded file to the directory. (we want to write to file - not to the
# directory itself)
if not out.endswith(os.sep):
out += os.sep
return out
示例9: parseAdvisoryString
def parseAdvisoryString(wwaString):
advisoryString = ''
words = wwaString.split('^')
for word in words:
if '<None>' in word:
continue
entries = word.split('.')
hazard = entries[0]
advisory = entries[1]
if hazard in DEFS['wwa']['hazards']:
advisoryString += DEFS['wwa']['hazards'][hazard] + ' '
else:
stderr.write('WARNING: Unknown hazard code: ' + hazard + '\n'); stderr.flush()
if advisory in DEFS['wwa']['advisories']:
advisoryString += DEFS['wwa']['advisories'][advisory] + '\n'
else:
stderr.write('WARNING: Unknown advisory code: ' + advisory + '\n'); stderr.flush()
if len(advisoryString) == 0:
advisoryString = '<None>'
else:
advisoryString = advisoryString.strip().title()
return advisoryString
示例10: daemonize
def daemonize(self):
"""
Forks the process(es) from the controlling terminal
and redirects I/O streams for logging.
"""
self.fork()
chdir(getcwd())
setsid()
umask(0)
self.fork()
stdout.flush()
stderr.flush()
si= file(self.stdin, 'w+')
so= file(self.stdout, 'a+')
se= file(self.stderr, 'a+', 0)
dup2(si.fileno(), stdin.fileno())
dup2(so.fileno(), stdout.fileno())
dup2(se.fileno(), stderr.fileno())
register(self.del_pid)
self.set_pid()
示例11: _print_progress
def _print_progress(self, epoch, cost=None, train_acc=None,
valid_acc=None, time_interval=10):
if self.print_progress > 0:
s = '\rEpoch: %d/%d' % (epoch, self.epochs)
if cost is not None:
s += ' | Cost %.2f' % cost
if train_acc is not None:
s += ' | TrainAcc %.2f' % train_acc
if valid_acc is not None:
s += ' | ValidAcc %.2f' % valid_acc
if self.print_progress > 1:
if not hasattr(self, 'ela_str_'):
self.ela_str_ = '00:00:00'
if not epoch % time_interval:
ela_sec = time() - self.init_time_
self.ela_str_ = self._to_hhmmss(ela_sec)
s += ' | Elapsed: %s' % self.ela_str_
if self.print_progress > 2:
if not hasattr(self, 'eta_str_'):
self.eta_str_ = '00:00:00'
if not epoch % time_interval:
eta_sec = ((ela_sec / float(epoch)) *
self.epochs - ela_sec)
self.eta_str_ = self._to_hhmmss(eta_sec)
s += ' | ETA: %s' % self.eta_str_
stderr.write(s)
stderr.flush()
示例12: init
def init():
"""Initialize hitch in this directory."""
if call(["which", "virtualenv"], stdout=PIPE):
stderr.write("You must have python-virtualenv installed to use hitch.\n")
stderr.flush()
exit(1)
if hitchdir.hitch_exists():
stderr.write("Hitch has already been initialized in this directory.\n")
stderr.flush()
exit(1)
makedirs(".hitch")
pip = path.abspath(path.join(".hitch", "virtualenv", "bin", "pip"))
call(["virtualenv", ".hitch/virtualenv", "--no-site-packages"])
call([pip, "install", "-U", "pip"])
if path.exists("hitchreqs.txt"):
call([pip, "install", "-r", "hitchreqs.txt"])
else:
call([pip, "install", "hitchtest"])
pip_freeze = check_output([pip, "freeze"])
with open("hitchreqs.txt", "w") as hitchreqs_handle:
hitchreqs_handle.write(pip_freeze)
示例13: task_message
def task_message(task_id, escience_token, server_url, wait_timer, task='not_progress_bar'):
"""
Function to check create and destroy celery tasks running from orka-CLI
and log task state messages.
"""
payload = {"job": {"task_id": task_id}}
yarn_cluster_logger = ClusterRequest(escience_token, server_url, payload, action='job')
previous_response = {'job': {'state': 'placeholder'}}
response = yarn_cluster_logger.retrieve()
while 'state' in response['job']:
if response['job']['state'].replace('\r','') != previous_response['job']['state'].replace('\r',''):
if task == 'has_progress_bar':
stderr.write(u'{0}\r'.format(response['job']['state']))
stderr.flush()
else:
stderr.write('{0}'.format('\r'))
logging.log(SUMMARY, '{0}'.format(response['job']['state']))
previous_response = response
else:
stderr.write('{0}'.format('.'))
sleep(wait_timer)
response = yarn_cluster_logger.retrieve()
stderr.flush()
if 'success' in response['job']:
stderr.write('{0}'.format('\r'))
return response['job']['success']
elif 'error' in response['job']:
stderr.write('{0}'.format('\r'))
logging.error(response['job']['error'])
exit(error_fatal)
示例14: doCombination
def doCombination(self):
## Contrary to Number-counting models, here each channel PDF already contains the nuisances
## So we just have to build the combined pdf
if len(self.DC.bins) > 1 or not self.options.forceNonSimPdf:
for (postfixIn,postfixOut) in [ ("","_s"), ("_bonly","_b") ]:
simPdf = ROOT.RooSimultaneous("model"+postfixOut, "model"+postfixOut, self.out.binCat) if self.options.noOptimizePdf else ROOT.RooSimultaneousOpt("model"+postfixOut, "model"+postfixOut, self.out.binCat)
for b in self.DC.bins:
pdfi = self.out.pdf("pdf_bin%s%s" % (b,postfixIn))
simPdf.addPdf(pdfi, b)
if len(self.DC.systs) and (not self.options.noOptimizePdf) and self.options.moreOptimizeSimPdf:
simPdf.addExtraConstraints(self.out.nuisPdfs)
if self.options.verbose:
stderr.write("Importing combined pdf %s\n" % simPdf.GetName()); stderr.flush()
self.out._import(simPdf)
if self.options.noBOnly: break
else:
self.out._import(self.out.pdf("pdf_bin%s" % self.DC.bins[0]).clone("model_s"), ROOT.RooFit.Silence())
if not self.options.noBOnly:
self.out._import(self.out.pdf("pdf_bin%s_bonly" % self.DC.bins[0]).clone("model_b"), ROOT.RooFit.Silence())
if self.options.fixpars:
pars = self.out.pdf("model_s").getParameters(self.out.obs)
iter = pars.createIterator()
while True:
arg = iter.Next()
if arg == None: break;
if arg.InheritsFrom("RooRealVar") and arg.GetName() != "r":
arg.setConstant(True);
示例15: _print_progress
def _print_progress(self, iteration, n_iter,
cost=None, time_interval=10):
if self.print_progress > 0:
s = '\rIteration: %d/%d' % (iteration, n_iter)
if cost:
s += ' | Cost %.2f' % cost
if self.print_progress > 1:
if not hasattr(self, 'ela_str_'):
self.ela_str_ = '00:00:00'
if not iteration % time_interval:
ela_sec = time() - self._init_time
self.ela_str_ = self._to_hhmmss(ela_sec)
s += ' | Elapsed: %s' % self.ela_str_
if self.print_progress > 2:
if not hasattr(self, 'eta_str_'):
self.eta_str_ = '00:00:00'
if not iteration % time_interval:
eta_sec = ((ela_sec / float(iteration)) *
n_iter - ela_sec)
if eta_sec < 0.0:
eta_sec = 0.0
self.eta_str_ = self._to_hhmmss(eta_sec)
s += ' | ETA: %s' % self.eta_str_
stderr.write(s)
stderr.flush()