本文整理汇总了Python中galaxy.datatypes.binary.Binary类的典型用法代码示例。如果您正苦于以下问题:Python Binary类的具体用法?Python Binary怎么用?Python Binary使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Binary类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, **kwd):
Binary.__init__(self, **kwd)
# Binary model
self.add_composite_file('model.hmm.h3m', is_binary=True)
# SSI index for binary model
self.add_composite_file('model.hmm.h3i', is_binary=True)
# Profiles (MSV part)
self.add_composite_file('model.hmm.h3f', is_binary=True)
# Profiles (remained)
self.add_composite_file('model.hmm.h3p', is_binary=True)
示例2: __init__
def __init__(self, **kwd):
Binary.__init__(self, **kwd)
self.add_composite_file(
'wiff',
description='AB SCIEX files in .wiff format. This can contain all needed information or only metadata.',
is_binary=True)
self.add_composite_file(
'wiff_scan',
description='AB SCIEX spectra file (wiff.scan), if the corresponding .wiff file only contains metadata.',
optional='True', is_binary=True)
示例3: __init__
def __init__(self, **kwd):
"""
A Fastsearch Index consists of a binary file with the fingerprints
and a pointer the actual molecule file.
"""
Binary.__init__(self, **kwd)
self.add_composite_file('molecule.fs', is_binary=True,
description='OpenBabel Fastsearch Index')
self.add_composite_file('molecule.sdf', optional=True,
is_binary=False, description='Molecule File')
self.add_composite_file('molecule.smi', optional=True,
is_binary=False, description='Molecule File')
self.add_composite_file('molecule.inchi', optional=True,
is_binary=False, description='Molecule File')
self.add_composite_file('molecule.mol2', optional=True,
is_binary=False, description='Molecule File')
self.add_composite_file('molecule.cml', optional=True,
is_binary=False, description='Molecule File')
示例4: handle_uploaded_dataset_file
def handle_uploaded_dataset_file(filename, datatypes_registry, ext='auto', is_multi_byte=False):
is_valid, ext = handle_compressed_file(filename, datatypes_registry, ext=ext)
if not is_valid:
raise InappropriateDatasetContentError('The compressed uploaded file contains inappropriate content.')
if ext in AUTO_DETECT_EXTENSIONS:
ext = guess_ext(filename, sniff_order=datatypes_registry.sniff_order, is_multi_byte=is_multi_byte)
if check_binary(filename):
if not Binary.is_ext_unsniffable(ext) and not datatypes_registry.get_datatype_by_extension(ext).sniff(filename):
raise InappropriateDatasetContentError('The binary uploaded file contains inappropriate content.')
elif check_html(filename):
raise InappropriateDatasetContentError('The uploaded file contains inappropriate HTML content.')
return ext
示例5: __init__
def __init__( self, **kwd ):
Binary.__init__( self, **kwd )
示例6: list
"""
Checking if the file is in FCS format. Should read FCS2.0, FCS3.0
and FCS3.1
"""
r.packages.importr("flowCore")
rlib = r.packages.packages
try:
fcsobject = rlib.flowCore.isFCSfile(filename)
return list(fcsobject)[0]
except:
return False
def get_mime(self):
"""Returns the mime type of the datatype"""
return 'application/octet-stream'
Binary.register_sniffable_binary_format("fcs","fcs",FCS)
class FlowText(Tabular):
"""Class describing an Flow Text file"""
file_ext = "flowtext"
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Text Flow file"
dataset.blurb = data.nice_size(dataset.get_size())
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek(self, dataset):
try:
示例7: display_peek
else:
dataset.peek = 'file does not exist'
dataset.blurb = 'file purged from disk'
def display_peek( self, dataset ):
try:
return dataset.peek
except:
return "Matlab Binary file (%s)" % ( data.nice_size( dataset.get_size() ) )
def display_data(self, trans, dataset, preview=False, filename=None, to_ext=None, size=None, offset=None, **kwd):
if preview:
return ("MATLAB data files cannot be previewed.")
else:
return super(Matlab, self).display_data( trans, dataset, preview, filename, to_ext, size, offset, **kwd)
Binary.register_sniffable_binary_format("mat", "mat", Matlab)
class Wav(Binary):
file_ext = "wav"
def __init__( self, **kwd ):
Binary.__init__( self, **kwd )
def sniff( self, filename ):
try:
header = open( filename ).read()
if header.starts_with("RIFF"):
return True
else:
return False
except:
示例8: add_file
def add_file(dataset, registry, json_file, output_path):
data_type = None
line_count = None
converted_path = None
stdout = None
link_data_only = dataset.get('link_data_only', 'copy_files')
run_as_real_user = in_place = dataset.get('in_place', True)
purge_source = dataset.get('purge_source', True)
# in_place is True if there is no external chmod in place,
# however there are other instances where modifications should not occur in_place:
# when a file is added from a directory on the local file system (ftp import folder or any other path).
if dataset.type in ('server_dir', 'path_paste', 'ftp_import'):
in_place = False
check_content = dataset.get('check_content' , True)
auto_decompress = dataset.get('auto_decompress', True)
try:
ext = dataset.file_type
except AttributeError:
file_err('Unable to process uploaded file, missing file_type parameter.', dataset, json_file)
return
if dataset.type == 'url':
try:
page = urlopen(dataset.path) # page will be .close()ed by sniff methods
temp_name, dataset.is_multi_byte = sniff.stream_to_file(page, prefix='url_paste', source_encoding=util.get_charset_from_http_headers(page.headers))
except Exception as e:
file_err('Unable to fetch %s\n%s' % (dataset.path, str(e)), dataset, json_file)
return
dataset.path = temp_name
# See if we have an empty file
if not os.path.exists(dataset.path):
file_err('Uploaded temporary file (%s) does not exist.' % dataset.path, dataset, json_file)
return
if not os.path.getsize(dataset.path) > 0:
file_err('The uploaded file is empty', dataset, json_file)
return
if not dataset.type == 'url':
# Already set is_multi_byte above if type == 'url'
try:
dataset.is_multi_byte = multi_byte.is_multi_byte(codecs.open(dataset.path, 'r', 'utf-8').read(100))
except UnicodeDecodeError as e:
dataset.is_multi_byte = False
# Is dataset an image?
i_ext = get_image_ext(dataset.path)
if i_ext:
ext = i_ext
data_type = ext
# Is dataset content multi-byte?
elif dataset.is_multi_byte:
data_type = 'multi-byte char'
ext = sniff.guess_ext(dataset.path, registry.sniff_order, is_multi_byte=True)
# Is dataset content supported sniffable binary?
else:
# FIXME: This ignores the declared sniff order in datatype_conf.xml
# resulting in improper behavior
type_info = Binary.is_sniffable_binary(dataset.path)
if type_info:
data_type = type_info[0]
ext = type_info[1]
if not data_type:
root_datatype = registry.get_datatype_by_extension(dataset.file_type)
if getattr(root_datatype, 'compressed', False):
data_type = 'compressed archive'
ext = dataset.file_type
else:
# See if we have a gzipped file, which, if it passes our restrictions, we'll uncompress
is_gzipped, is_valid = check_gzip(dataset.path, check_content=check_content)
if is_gzipped and not is_valid:
file_err('The gzipped uploaded file contains inappropriate content', dataset, json_file)
return
elif is_gzipped and is_valid and auto_decompress:
if link_data_only == 'copy_files':
# We need to uncompress the temp_name file, but BAM files must remain compressed in the BGZF format
CHUNK_SIZE = 2 ** 20 # 1Mb
fd, uncompressed = tempfile.mkstemp(prefix='data_id_%s_upload_gunzip_' % dataset.dataset_id, dir=os.path.dirname(output_path), text=False)
gzipped_file = gzip.GzipFile(dataset.path, 'rb')
while 1:
try:
chunk = gzipped_file.read(CHUNK_SIZE)
except IOError:
os.close(fd)
os.remove(uncompressed)
file_err('Problem decompressing gzipped data', dataset, json_file)
return
if not chunk:
break
os.write(fd, chunk)
os.close(fd)
gzipped_file.close()
# Replace the gzipped file with the decompressed file if it's safe to do so
if not in_place:
dataset.path = uncompressed
else:
shutil.move(uncompressed, dataset.path)
os.chmod(dataset.path, 0o644)
dataset.name = dataset.name.rstrip('.gz')
data_type = 'gzip'
if not data_type:
# See if we have a bz2 file, much like gzip
is_bzipped, is_valid = check_bz2(dataset.path, check_content)
#.........这里部分代码省略.........
示例9: __init__
def __init__( self, **kwd ):
Binary.__init__( self, **kwd )
log.info('Creating cummeRbund CuffDataDB')
示例10: PepXmlReport
rval = ['<html><head><title>Wiff Composite Dataset </title></head><p/>']
rval.append('<div>This composite dataset is composed of the following files:<p/><ul>')
for composite_name, composite_file in self.get_composite_files(dataset=dataset).items():
fn = composite_name
opt_text = ''
if composite_file.optional:
opt_text = ' (optional)'
if composite_file.get('description'):
rval.append('<li><a href="%s" type="text/plain">%s (%s)</a>%s</li>' % (fn, fn, composite_file.get('description'), opt_text))
else:
rval.append('<li><a href="%s" type="text/plain">%s</a>%s</li>' % (fn, fn, opt_text))
rval.append('</ul></div></html>')
return "\n".join(rval)
Binary.register_sniffable_binary_format("wiff", "wiff", Wiff )
class PepXmlReport(Tabular):
"""pepxml converted to tabular report"""
edam_data = "data_2536"
file_ext = "pepxml.tsv"
def __init__(self, **kwd):
super(PepXmlReport, self).__init__(**kwd)
self.column_names = ['Protein', 'Peptide', 'Assumed Charge', 'Neutral Pep Mass (calculated)', 'Neutral Mass', 'Retention Time', 'Start Scan', 'End Scan', 'Search Engine', 'PeptideProphet Probability', 'Interprophet Probabaility']
def display_peek(self, dataset):
"""Returns formated html of peek"""
return self.make_html_table(dataset, column_names=self.column_names)
示例11: check_image
image = check_image( dataset.path )
if image:
if not PIL:
image = None
# get_image_ext() returns None if nor a supported Image type
ext = get_image_ext( dataset.path, image )
data_type = ext
# Is dataset content multi-byte?
elif dataset.is_multi_byte:
data_type = 'multi-byte char'
ext = sniff.guess_ext( dataset.path, is_multi_byte=True )
# Is dataset content supported sniffable binary?
else:
# FIXME: This ignores the declared sniff order in datatype_conf.xml
# resulting in improper behavior
type_info = Binary.is_sniffable_binary( dataset.path )
if type_info:
data_type = type_info[0]
ext = type_info[1]
if not data_type:
root_datatype = registry.get_datatype_by_extension( dataset.file_type )
if getattr( root_datatype, 'compressed', False ):
data_type = 'compressed archive'
ext = dataset.file_type
else:
# See if we have a gzipped file, which, if it passes our restrictions, we'll uncompress
is_gzipped, is_valid = check_gzip( dataset.path )
if is_gzipped and not is_valid:
file_err( 'The gzipped uploaded file contains inappropriate content', dataset, json_file )
return
elif is_gzipped and is_valid:
示例12: sniff
def sniff(self, filename):
""" The first 8 bytes of any NCBI sra file is 'NCBI.sra', and the file is binary.
For details about the format, see http://www.ncbi.nlm.nih.gov/books/n/helpsra/SRA_Overview_BK/#SRA_Overview_BK.4_SRA_Data_Structure
"""
try:
header = open(filename).read(8)
if binascii.b2a_hex(header) == binascii.hexlify("NCBI.sra"):
return True
else:
return False
except:
return False
def set_peek(self, dataset, is_multi_byte=False):
if not dataset.dataset.purged:
dataset.peek = "Binary sra file"
dataset.blurb = nice_size(dataset.get_size())
else:
dataset.peek = "file does not exist"
dataset.blurb = "file purged from disk"
def display_peek(self, dataset):
try:
return dataset.peek
except:
return "Binary sra file (%s)" % (nice_size(dataset.get_size()))
Binary.register_sniffable_binary_format("sra", "sra", Sra)
示例13: Count
"""
k-mer count and presence
"""
from galaxy.datatypes.binary import Binary
import logging
log = logging.getLogger(__name__)
class Count(Binary):
def __init__(self, **kwd):
Binary.__init__(self, **kwd)
class Presence(Binary):
def __init__(self, **kwd):
Binary.__init__(self, **kwd)
Binary.register_unsniffable_binary_ext("ct")
Binary.register_unsniffable_binary_ext("pt")
示例14: Sf3
from galaxy.datatypes.binary import Binary
class Sf3(Binary):
"""Class describing a Scaffold SF3 files"""
file_ext = "sf3"
Binary.register_unsniffable_binary_ext('sf3')
示例15: format
"""
CEL datatype sniffer for Command Console version 1 format (binary files).
http://media.affymetrix.com/support/developer/powertools/changelog/gcos-agcc/cel.html#calvin
http://media.affymetrix.com/support/developer/powertools/changelog/gcos-agcc/generic.html
"""
import data
from galaxy.datatypes.binary import Binary
class CelCc1( Binary ):
file_ext = "celcc1"
def sniff(self, filename):
# Determine if the file is in CEL Command Console version 1 format.
# Filename is in the format 'upload_file_data_jqRiCG', therefore we must check the header bytes.
# Get the first 2 'UBYTE' (8bit unsigned). First is magic number 59, second is version number (always 1).
with open(filename, "rb") as f:
byte = f.read(2)
try:
if byte[0:2] == b'\x3B\x01':
return True
else:
return False
except IndexError:
return False
Binary.register_sniffable_binary_format("celcc1", "celcc1", CelCc1)