本文整理汇总了Python中decoder.Decoder.decode方法的典型用法代码示例。如果您正苦于以下问题:Python Decoder.decode方法的具体用法?Python Decoder.decode怎么用?Python Decoder.decode使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类decoder.Decoder
的用法示例。
在下文中一共展示了Decoder.decode方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: JsonUnmarshaler
# 需要导入模块: from decoder import Decoder [as 别名]
# 或者: from decoder.Decoder import decode [as 别名]
class JsonUnmarshaler(object):
"""The top-level Unmarshaler used by the Reader for JSON payloads. While
you may use this directly, it is strongly discouraged.
"""
def __init__(self):
self.decoder = Decoder()
def load(self, stream):
return self.decoder.decode(json.load(stream, object_pairs_hook=OrderedDict))
def loadeach(self, stream):
for o in sosjson.items(stream, object_pairs_hook=OrderedDict):
yield self.decoder.decode(o)
示例2: MsgPackUnmarshaler
# 需要导入模块: from decoder import Decoder [as 别名]
# 或者: from decoder.Decoder import decode [as 别名]
class MsgPackUnmarshaler(object):
"""The top-level Unmarshaler used by the Reader for MsgPack payloads.
While you may use this directly, it is strongly discouraged.
"""
def __init__(self):
self.decoder = Decoder()
self.unpacker = msgpack.Unpacker(object_pairs_hook=OrderedDict)
def load(self, stream):
return self.decoder.decode(msgpack.load(stream, object_pairs_hook=OrderedDict))
def loadeach(self, stream):
for o in self.unpacker:
yield self.decoder.decode(o)
示例3: get_n_best_lists
# 需要导入模块: from decoder import Decoder [as 别名]
# 或者: from decoder.Decoder import decode [as 别名]
def get_n_best_lists(initial_params, n, args):
sys.stderr.write("Getting n best lists...\n")
num_songs_translated = 0
n_best_lists = {}
i = 0
f = open(args.training_songs, "r")
for path in f:
path = path.strip()
if not path:
continue
training_song = converter.parse(path);
num_songs_translated += 1
transpose(training_song, "C")
sys.stderr.write("transposed " + path + "\n")
lm = LanguageModel(args.harmony, "%s/%s_language_model.txt" % (args.model_directory, args.harmony))
tms = []
melodies = args.melodies.split(",")
for melody in melodies:
phrases = "%s/%s_%s_translation_model_rhythm.txt" % (args.model_directory, melody, args.harmony)
notes = "%s/%s_%s_translation_model.txt" % (args.model_directory, melody, args.harmony)
tm = TranslationModel(melody, args.harmony, phrases, notes)
tms.append(tm)
d = Decoder([(melody, training_song.parts[melody]) for melody in melodies],
lm, tms,
tm_phrase_weight=initial_params[0], tm_notes_weight=initial_params[1],
lm_weight=initial_params[2])
try:
hyps = d.decode(n)
n_best_lists[path] = hyps
sys.stderr.write("decoded " + path + "\n")
i += 1
except Exception as e:
sys.stderr.write(str(e))
return n_best_lists
示例4: main
# 需要导入模块: from decoder import Decoder [as 别名]
# 或者: from decoder.Decoder import decode [as 别名]
def main():
dbc = DBController()
dec = Decoder([TLEDecoder(), TLEListDecoder()])
dlc = None
try:
dlc = Downloader()
except DownloaderError as e:
print("failed to initialize downloader: " + str(e))
sys.exit(1)
for esat in dlc.get_data():
sats = []
try:
sats = dec.decode(esat.fmt, esat.data)
except DecoderError as e:
print("failed to decode: " + str(e))
try:
for sat in sats:
dbc.add(sat)
dbc.sync()
except DBError as e:
print("failed to insert into db: " + str(e))
示例5: parse_contents
# 需要导入模块: from decoder import Decoder [as 别名]
# 或者: from decoder.Decoder import decode [as 别名]
def parse_contents(f, bytes_to_read):
'''
Do complex reading of caption data from binary file.
Return a list of statements and characters
'''
if DEBUG:
print 'going to read {bytes} bytes in binary file caption statement.'.format(bytes=bytes_to_read)
statements = []
bytes_read = 0
#TODO: Check to see if decoder state is carred between packet processing
#currently recreating the decoder (and therefore resetting its state)
#on every packet paylod processing. This may be incorrect
decoder = Decoder()
line = ''
while bytes_read<bytes_to_read:
statement = decoder.decode(f)
if statement:
bytes_read += len(statement)
statements.append(statement)
#if isinstance(statement, code_set.Kanji) or isinstance(statement, code_set.Alphanumeric) \
# or isinstance(statement, code_set.Hiragana) or isinstance(statement, code_set.Katakana):
# if DEBUG:
# print statement #just dump to stdout for now
# line += str(statement)
#if len(line)>0:
# print '{l}\n'.format(l=line)
return statements
示例6: decode
# 需要导入模块: from decoder import Decoder [as 别名]
# 或者: from decoder.Decoder import decode [as 别名]
def decode(self):
encoded_content = open(self.args.input, 'rb').read()
mappings = json.loads(open(self.args.mappings, 'r').read())
decoder = Decoder(encoded_content, mappings)
raw_content = decoder.decode()
with open(self.args.output, 'w') as f:
f.write(raw_content)
示例7: encode_decode
# 需要导入模块: from decoder import Decoder [as 别名]
# 或者: from decoder.Decoder import decode [as 别名]
def encode_decode(self, k):
print "\nTesting encoding and then decoding with k = %s" % k
md5 = hashlib.md5()
with FileChunker(k, SYMBOLSIZE, DEFAULT_FILE) as chunker:
chunk = chunker.chunk()
while chunk:
padding = chunk.padding
symbols = [(i, chunk[i]) for i in xrange(k)]
encoder = Encoder(k, symbols)
symbols = []
# Start at k/2 and produce 1.25k more symbols to get a mix
# of parity and source symbols
for i in xrange(k * 2):
symbols.append(encoder.next())
encoder = None
decoder = Decoder(k)
for tup in symbols:
decoder.append(tup)
decoder.decode()
decoded = bytearray()
for i in xrange(k):
esi, s = decoder.next()
decoded += s.tostring()
decoder = None
if padding:
padding = 0 - padding
print "Removing padding", padding, "bytes"
decoded = decoded[:padding]
md5.update(decoded)
# Continue on to the next chunk
chunk = chunker.chunk()
print "Original digest:", self.original_digest
print "Decoded digest:", md5.hexdigest()
return self.original_digest == md5.hexdigest()
示例8: decodeAndSolve
# 需要导入模块: from decoder import Decoder [as 别名]
# 或者: from decoder.Decoder import decode [as 别名]
def decodeAndSolve(image, showSolution=False):
d = Decoder(image)
d.decode()
s = SuDoKu(d.puzzle)
solution = s.solution()
if showSolution:
img = copy(d.puzzleImage)
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
for q, p in ((x, y) for x in (i * 100 + 30 for i in range(9)) for y in (i * 100 + 70 for i in range(9))):
if ((q - 30) / 100, (p - 70) / 100) not in d.numberLocations:
cv2.putText(
img,
str(solution[(q - 30) / 100][(p - 70) / 100]),
(q, p),
cv2.FONT_HERSHEY_PLAIN,
4,
(0, 150, 0),
thickness=6,
)
cv2.imshow("Solution - Press any key to exit.", img)
cv2.waitKey(0)
示例9: run
# 需要导入模块: from decoder import Decoder [as 别名]
# 或者: from decoder.Decoder import decode [as 别名]
def run(file_name):
decoder = Decoder('{}.png'.format(file_name))
pixel_lines = [
create_pixels(decoder.Pixel, scanline, decoder.bytes_per_pixel)
for scanline in decoder.decode()
]
c = Classifier()
color_lines = []
for line in pixel_lines:
colors = [
c.classify(Point(pixel.red, pixel.green, pixel.blue, pixel.alpha))
for pixel in line
]
color_lines.append(colors)
with open('{}_colors.json'.format(file_name), 'w') as file:
file.write(json.dumps(color_lines))
示例10: simulate
# 需要导入模块: from decoder import Decoder [as 别名]
# 或者: from decoder.Decoder import decode [as 别名]
def simulate(self, mode=Decoder.SUM_PROD):
"""
:param mode: The algorithm (sum-prod vs max-prod) to use in the decoder simulations
:return:
"""
self.mode = mode
self.codewords = []
self.decoded = []
for var in self.variance_levels:
codewords = []
decoded = []
transmissions = []
decoder = Decoder(var, self.mode)
for i in range(0, self.iterations):
code = codeword.Codeword()
codewords.append(code.codeword)
decoded.append(decoder.decode(code.transmit(var)))
transmissions.append(code.transmission)
self.codewords.append(codewords)
self.decoded.append(decoded)
self.transmissions.append(transmissions)
示例11: main
# 需要导入模块: from decoder import Decoder [as 别名]
# 或者: from decoder.Decoder import decode [as 别名]
def main(infile, outfile, fromline=0):
fin = open(infile, 'r')
fout = open(outfile, 'w')
fseek = open(log_path + '/db_progress', 'w')
fout.write("USE %s;\n" % database)
# read in file
i = 0 # line counter
for l in fin:
if i < fromline:
i += 1
continue # skip lines before fromline
if 'OK' in l:
date = l[0:10]
time = l[11:19]
timestamp = "'" + date + ' ' + time + "'"
packet = l[28:].strip()
(nid, data) = Decoder.decode(packet)
if data:
if nodes.has_key(nid):
table = nodes[nid]['db']['table']
fieldstring = ', '.join(table['fields'])
stringdata = [timestamp]
for d in data:
stringdata.append("'" + str(d) + "'")
valstring = ', '.join(stringdata)
fout.write("INSERT INTO %s (%s) VALUES (%s);\n"
% (table['name'], fieldstring, valstring))
i += 1
fseek.write(str(i)) # write last line to file
fin.close()
fout.close()
fseek.close()
示例12: Machine
# 需要导入模块: from decoder import Decoder [as 别名]
# 或者: from decoder.Decoder import decode [as 别名]
class Machine(object):
def __init__(self, fname):
self.fname = fname
self.decoder = Decoder(self)
self.breakpoints = {}
self.breakpoint_conditions = {}
self.prev_input = None
self.hex_input_mode = False
self.tracked_registers = set([SP])
self.reset()
def reset(self):
with open(self.fname) as f:
self.mem = Memory(f)
self.door_unlocked = False
self.step_count = 1
self.break_at_finish = -1
self.registers = Registers()
self.registers[PC] = self.mem[0xfffe]
self.call_targets = [self.registers[PC]]
self.callsites = []
self.current_block_start = self.registers[PC]
self.insn_count = 0
def debug(self, prog_input=raw_input, debug_input=raw_input,
prog_output=sys.stdout, debug_output=sys.stdout,
trace=None):
self.prog_input = prog_input
self.debug_input = debug_input
self.prog_output = prog_output
self.debug_output = debug_output
self.trace = trace
try:
while self.execute_next():
pass
except EOFError:
self.display('EOF received. Bye!')
def display(self, v):
self.debug_output.write(str(v) + '\n')
def display_state(self):
lines = disassembler.disassemble(self.current_block_start, self.mem,
self.registers[PC] + 10)
for line in lines:
addr, insn = line
text = "%x: %s" % line
if addr == self.registers[PC]:
self.display(colored(text, 'green'))
else:
self.display(text)
self.display('')
for i, r in enumerate(self.registers):
self.debug_output.write('%s: %04x' % (Disassembler.pretty_reg(i), r))
if (i + 1) % 4 == 0:
self.debug_output.write('\n')
else:
self.debug_output.write('\t')
self.display('')
for reg in self.tracked_registers:
self.debug_output.write(colored('%s >> ' % Disassembler.pretty_reg(reg), 'blue'))
self.display_mem(self.registers[reg])
def display_mem(self, addr):
start = max(0, addr - 16)
end = min(0xffff, addr + 16)
self.debug_output.write('%x:' % start)
for i in xrange(start, end):
if (i - start) % 2 == 0:
self.debug_output.write(' ')
text = '%02x' % self.mem.get_byte(i)
if i == addr:
text = colored(text, 'red')
self.debug_output.write(text)
self.display('')
def handle_cmds(self):
while True:
self.display_state()
s = self.debug_input('> ')
if s == '':
if self.prev_input is not None:
s = self.prev_input
self.prev_input = s
cmd, sep, rest = s.partition(' ')
rest = rest.strip()
if cmd == 'break':
target, sep, cond = rest.partition(' if ')
self.breakpoints[int(target, 16)] = -1
if cond != '':
self.breakpoint_conditions[int(target, 16)] = \
lambda: eval(cond, globals(), self.__dict__)
elif cmd == 'unbreak':
if rest == 'all':
self.breakpoints = set()
else:
try:
del self.breakpoints[int(rest, 16)]
#.........这里部分代码省略.........
示例13: main
# 需要导入模块: from decoder import Decoder [as 别名]
# 或者: from decoder.Decoder import decode [as 别名]
def main():
file_name = "data/to_decipher.txt"
crypto = FileReader().readFile(file_name)
decoder = Decoder(crypto)
decrypted = decoder.decode()
return 0
示例14: decode
# 需要导入模块: from decoder import Decoder [as 别名]
# 或者: from decoder.Decoder import decode [as 别名]
def decode(self):
"""
Orchestrates the reading of file shares into encoded symbols
and decoding of the encoded symbols
"""
self.stats['start_time'] = time.time()
self.verify_input_dir()
self.verify_output_file()
# Outer loop will iterate over blocks in a directory.
# Block directories contain shares per block.
# Blocks start at 0 and increment by 1. If block n doesn't exist
# Then assume that is the end of the file
block = 0
blockdir = os.path.join(self.input_dir, str(block))
while os.path.exists(blockdir):
# Attempt to read metadata for this block
self.start_timer()
k, padding = self.read_block_meta_data(blockdir)
self.add_time(self.stop_timer(), 'io_time')
# For each file in the block directory(excluding meta) read each
# share. Each will be an encoding symbol
decoder = Decoder(k)
read_symbols = 0
for _file in os.listdir(blockdir):
# Skip non files
if not os.path.isfile(os.path.join(blockdir, _file)):
continue
try:
# Open the share file in binary mode
self.start_timer()
symbol = numpy.fromfile(os.path.join(blockdir, _file),
dtype='uint64')
self.add_time(self.stop_timer(), 'io_time')
# Add the symbol to the decoder.
# A symbol is a (integer, numpy array) tuple
can_decode = decoder.append((int(_file), symbol))
read_symbols += 1
if can_decode:
break
except Exception:
continue
pass
# Ideally we want more than k encoded symbols.
# We will fail with less than k
if read_symbols < k:
self.exit("There were not sufficient symbols"
" to recover block %s" % block)
if not can_decode:
self.exit("A decoding schedule was not possible "
"with the symbols provided.")
# Instruct decoder to calculate intermediate symbols from
# known encoding symbols
self.start_timer()
decoder.decode()
self.add_time(self.stop_timer(), 'decoding_time')
# Steam source symbol output by encoding the first
# k encoded symbols.
# The first k source symbols == the first k encoding symbols
target = open(self.output_file, 'ab')
for i in xrange(k):
self.start_timer()
s = decoder.next()[1]
self.add_time(self.stop_timer(), 'decoding_time')
self.start_timer()
s.tofile(target)
self.add_time(self.stop_timer(), 'io_time')
target.close()
# Padding should only be on the last block but we check anyway
# @TODO - Ensure file size is accurate before truncating
if (padding):
self.start_timer()
size = os.path.getsize(self.output_file) - padding
target = io.open(self.output_file, 'a+b')
target.truncate(size)
target.close()
self.add_time(self.stop_timer(), 'io_time')
# Increment block number by 1
block += 1
blockdir = os.path.join(self.input_dir, str(block))
self.stats['blocks_decoded'] = block
self.stats['end_time'] = time.time()
self.stats['elapsed_time'] = \
#.........这里部分代码省略.........
示例15: TextCleanser
# 需要导入模块: from decoder import Decoder [as 别名]
# 或者: from decoder.Decoder import decode [as 别名]
class TextCleanser(object):
def __init__(self):
"""Constructor"""
self.generator = Generator()
self.decoder = Decoder()
# print "READY"
def heuristic_cleanse(self, text, gen_off_by_ones=False, ssk=False):
"""Accept noisy text, run through cleanser described in Gouws et al. 2011, and
return the cleansed text.
If gen_off_by_ones==True, generate spelling variants (1 edit distance away)."""
gen = self.generator
if ssk:
string_sim_func=gen.SSK_SIM
else:
string_sim_func=gen.IBM_SIM
replacements, old_tokens, candidates = gen.sent_generate_candidates(text, string_sim_func,
gen_off_by_ones)
# print candidates
# word_lattice = gen.generate_word_lattice(candidates)
word_mesh = gen.generate_word_mesh(candidates)
cleantext,error = self.decoder.decode(word_mesh)
if error:
print "mesh: ", word_mesh
print cleantext
print error
# raw_input("[PRESS ENTER]")
# exit(2)
# print "clean: ", cleantext
replacements = self.get_replacements(cleantext, old_tokens)
return cleantext, error, replacements
def phonetic_ED_cleanse(self, text, gen_off_by_ones=True):
gen = self.generator
replacements, old_tokens, candidates = gen.sent_generate_candidates(text, gen.PHONETIC_ED_SIM,
gen_off_by_ones)
#print candidates
# word_lattice = gen.generate_word_lattice(candidates)
word_mesh = gen.generate_word_mesh(candidates)
cleantext,error = self.decoder.decode(word_mesh)
replacements = self.get_replacements(cleantext, old_tokens)
return cleantext, error, replacements
def ssk_cleanse(self, text, gen_off_by_ones=False):
"Use subsequence overlap similarity function"
return self.heuristic_cleanse(text, gen_off_by_ones, ssk=True)
def log_oovs(self, text):
"""return a list of all out-of-vocabulary words for pre-processing purposes"""
raise NotImplemented("Not yet implemented")
def get_replacements(self, cleantext, old_tokens):
"""return the token replacements that were made"""
new_tokens = self.generator.fix_bad_tokenisation(cleantext.split())
# if new_tokens contain more tokens than old_tokens then alignment is screwed
if len(new_tokens)>len(old_tokens):
replacements = -1
else:
replacements = []
for i, new_tok in enumerate(new_tokens):
if i >= len(old_tokens):
break
old_tok = old_tokens[i]
if new_tok!=old_tok.lower():
replacements.append((old_tok, new_tok))
return replacements