本文整理汇总了Python中galaxy.util.bunch.Bunch.metadata方法的典型用法代码示例。如果您正苦于以下问题:Python Bunch.metadata方法的具体用法?Python Bunch.metadata怎么用?Python Bunch.metadata使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类galaxy.util.bunch.Bunch
的用法示例。
在下文中一共展示了Bunch.metadata方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_dataset
# 需要导入模块: from galaxy.util.bunch import Bunch [as 别名]
# 或者: from galaxy.util.bunch.Bunch import metadata [as 别名]
def get_dataset(filename, index_attr='bam_index', dataset_id=1, has_data=True):
dataset = Bunch()
dataset.has_data = lambda: True
dataset.id = dataset_id
dataset.metadata = Bunch()
with get_input_files(filename) as input_files, get_tmp_path() as index_path:
dataset.file_name = input_files[0]
index = Bunch()
index.file_name = index_path
setattr(dataset.metadata, index_attr, index)
yield dataset
示例2: get_uploaded_datasets
# 需要导入模块: from galaxy.util.bunch import Bunch [as 别名]
# 或者: from galaxy.util.bunch.Bunch import metadata [as 别名]
#.........这里部分代码省略.........
else:
user_ftp_dir = trans.user_ftp_dir
for ( dirpath, dirnames, filenames ) in os.walk( user_ftp_dir ):
for filename in filenames:
path = relpath( os.path.join( dirpath, filename ), user_ftp_dir )
if not os.path.islink( os.path.join( dirpath, filename ) ):
# Normalize filesystem paths
if isinstance(path, unicode):
valid_files.append(unicodedata.normalize('NFC', path ))
else:
valid_files.append(path)
else:
ftp_files = []
for ftp_file in ftp_files:
if ftp_file not in valid_files:
log.warning( 'User passed an invalid file path in ftp_files: %s' % ftp_file )
continue
# TODO: warning to the user (could happen if file is already imported)
ftp_data_file = { 'local_filename' : os.path.abspath( os.path.join( user_ftp_dir, ftp_file ) ),
'filename' : os.path.basename( ftp_file ) }
file_bunch = get_data_file_filename( ftp_data_file, override_name=name, override_info=info )
if file_bunch.path:
file_bunch.to_posix_lines = to_posix_lines
file_bunch.space_to_tab = space_to_tab
rval.append( file_bunch )
return rval
file_type = self.get_file_type( context )
d_type = self.get_datatype( trans, context )
dbkey = context.get( 'dbkey', None )
writable_files = d_type.writable_files
writable_files_offset = 0
groups_incoming = [ None for _ in writable_files ]
for group_incoming in context.get( self.name, [] ):
i = int( group_incoming['__index__'] )
groups_incoming[ i ] = group_incoming
if d_type.composite_type is not None:
# handle uploading of composite datatypes
# Only one Dataset can be created
dataset = Bunch()
dataset.type = 'composite'
dataset.file_type = file_type
dataset.dbkey = dbkey
dataset.datatype = d_type
dataset.warnings = []
dataset.metadata = {}
dataset.composite_files = {}
dataset.uuid = None
# load metadata
files_metadata = context.get( self.metadata_ref, {} )
metadata_name_substition_default_dict = dict( [ ( composite_file.substitute_name_with_metadata, d_type.metadata_spec[ composite_file.substitute_name_with_metadata ].default ) for composite_file in d_type.composite_files.values() if composite_file.substitute_name_with_metadata ] )
for meta_name, meta_spec in d_type.metadata_spec.iteritems():
if meta_spec.set_in_upload:
if meta_name in files_metadata:
meta_value = files_metadata[ meta_name ]
if meta_name in metadata_name_substition_default_dict:
meta_value = sanitize_for_filename( meta_value, default=metadata_name_substition_default_dict[ meta_name ] )
dataset.metadata[ meta_name ] = meta_value
dataset.precreated_name = dataset.name = self.get_composite_dataset_name( context )
if dataset.datatype.composite_type == 'auto_primary_file':
# replace sniff here with just creating an empty file
temp_name, is_multi_byte = sniff.stream_to_file( StringIO.StringIO( d_type.generate_primary_file( dataset ) ), prefix='upload_auto_primary_file' )
dataset.primary_file = temp_name
dataset.to_posix_lines = True
dataset.space_to_tab = False
else:
file_bunch, warnings = get_one_filename( groups_incoming[ 0 ] )
writable_files_offset = 1
dataset.primary_file = file_bunch.path
dataset.to_posix_lines = file_bunch.to_posix_lines
dataset.space_to_tab = file_bunch.space_to_tab
dataset.warnings.extend( warnings )
if dataset.primary_file is None: # remove this before finish, this should create an empty dataset
raise Exception( 'No primary dataset file was available for composite upload' )
keys = [ value.name for value in writable_files.values() ]
for i, group_incoming in enumerate( groups_incoming[ writable_files_offset : ] ):
key = keys[ i + writable_files_offset ]
if group_incoming is None and not writable_files[ writable_files.keys()[ keys.index( key ) ] ].optional:
dataset.warnings.append( "A required composite file (%s) was not specified." % ( key ) )
dataset.composite_files[ key ] = None
else:
file_bunch, warnings = get_one_filename( group_incoming )
dataset.warnings.extend( warnings )
if file_bunch.path:
dataset.composite_files[ key ] = file_bunch.__dict__
else:
dataset.composite_files[ key ] = None
if not writable_files[ writable_files.keys()[ keys.index( key ) ] ].optional:
dataset.warnings.append( "A required composite file (%s) was not specified." % ( key ) )
return [ dataset ]
else:
datasets = get_filenames( context[ self.name ][0] )
rval = []
for dataset in datasets:
dataset.file_type = file_type
dataset.datatype = d_type
dataset.ext = self.get_datatype_ext( trans, context )
dataset.dbkey = dbkey
rval.append( dataset )
return rval
示例3: get_uploaded_datasets
# 需要导入模块: from galaxy.util.bunch import Bunch [as 别名]
# 或者: from galaxy.util.bunch.Bunch import metadata [as 别名]
#.........这里部分代码省略.........
space_to_tab = True
file_bunch = get_data_file_filename( data_file, override_name = name, override_info = info )
if file_bunch.path and url_paste:
if url_paste.strip():
warnings.append( "All file contents specified in the paste box were ignored." )
else: #we need to use url_paste
for file_bunch in get_url_paste_urls_or_filename( context, override_name = name, override_info = info ):
if file_bunch.path:
break
return file_bunch, warnings
def get_filenames( context ):
rval = []
data_file = context['file_data']
url_paste = context['url_paste']
name = context.get( 'NAME', None )
info = context.get( 'INFO', None )
space_to_tab = False
if context.get( 'space_to_tab', None ) not in ["None", None]:
space_to_tab = True
warnings = []
file_bunch = get_data_file_filename( data_file, override_name = name, override_info = info )
if file_bunch.path:
file_bunch.space_to_tab = space_to_tab
rval.append( file_bunch )
for file_bunch in get_url_paste_urls_or_filename( context, override_name = name, override_info = info ):
if file_bunch.path:
file_bunch.space_to_tab = space_to_tab
rval.append( file_bunch )
return rval
file_type = self.get_file_type( context )
d_type = self.get_datatype( trans, context )
dbkey = context.get( 'dbkey', None )
writable_files = d_type.writable_files
writable_files_offset = 0
groups_incoming = [ None for filename in writable_files ]
for group_incoming in context.get( self.name, [] ):
i = int( group_incoming['__index__'] )
groups_incoming[ i ] = group_incoming
if d_type.composite_type is not None:
#handle uploading of composite datatypes
#Only one Dataset can be created
dataset = Bunch()
dataset.type = 'composite'
dataset.file_type = file_type
dataset.dbkey = dbkey
dataset.datatype = d_type
dataset.warnings = []
dataset.metadata = {}
dataset.composite_files = {}
#load metadata
files_metadata = context.get( self.metadata_ref, {} )
for meta_name, meta_spec in d_type.metadata_spec.iteritems():
if meta_spec.set_in_upload:
if meta_name in files_metadata:
dataset.metadata[ meta_name ] = files_metadata[ meta_name ]
dataset_name = None
dataset_info = None
if dataset.datatype.composite_type == 'auto_primary_file':
#replace sniff here with just creating an empty file
temp_name, is_multi_byte = sniff.stream_to_file( StringIO.StringIO( d_type.generate_primary_file() ), prefix='upload_auto_primary_file' )
dataset.primary_file = temp_name
dataset.space_to_tab = False
dataset.precreated_name = dataset.name = 'Uploaded Composite Dataset (%s)' % ( file_type )
else:
file_bunch, warnings = get_one_filename( groups_incoming[ 0 ] )
if dataset.datatype.composite_type:
precreated_name = 'Uploaded Composite Dataset (%s)' % ( file_type )
writable_files_offset = 1
dataset.primary_file = file_bunch.path
dataset.space_to_tab = file_bunch.space_to_tab
dataset.precreated_name = file_bunch.precreated_name
dataset.name = file_bunch.precreated_name
dataset.warnings.extend( file_bunch.warnings )
if dataset.primary_file is None:#remove this before finish, this should create an empty dataset
raise Exception( 'No primary dataset file was available for composite upload' )
keys = [ value.name for value in writable_files.values() ]
for i, group_incoming in enumerate( groups_incoming[ writable_files_offset : ] ):
key = keys[ i + writable_files_offset ]
if group_incoming is None and not writable_files[ writable_files.keys()[ keys.index( key ) ] ].optional:
dataset.warnings.append( "A required composite file (%s) was not specified." % ( key ) )
dataset.composite_files[ key ] = None
else:
file_bunch, warnings = get_one_filename( group_incoming )
if file_bunch.path:
dataset.composite_files[ key ] = file_bunch.__dict__
else:
dataset.composite_files[ key ] = None
if not writable_files[ writable_files.keys()[ keys.index( key ) ] ].optional:
dataset.warnings.append( "A required composite file (%s) was not specified." % ( key ) )
return [ dataset ]
else:
datasets = get_filenames( context[ self.name ][0] )
rval = []
for dataset in datasets:
dataset.file_type = file_type
dataset.datatype = d_type
dataset.ext = self.get_datatype_ext( trans, context )
dataset.dbkey = dbkey
rval.append( dataset )
return rval
示例4: get_uploaded_datasets
# 需要导入模块: from galaxy.util.bunch import Bunch [as 别名]
# 或者: from galaxy.util.bunch.Bunch import metadata [as 别名]
#.........这里部分代码省略.........
purge = getattr(trans.app.config, 'ftp_upload_purge', True)
file_bunch = get_data_file_filename(ftp_data_file, override_name=name, override_info=info, purge=purge)
if file_bunch.path:
file_bunch.to_posix_lines = to_posix_lines
file_bunch.auto_decompress = auto_decompress
file_bunch.space_to_tab = space_to_tab
if file_type is not None:
file_bunch.file_type = file_type
if dbkey is not None:
file_bunch.dbkey = dbkey
rval.append(file_bunch)
return rval
file_type = self.get_file_type(context)
file_count = self.get_file_count(trans, context)
d_type = self.get_datatype(trans, context)
dbkey = self.get_dbkey(context)
tag_using_filenames = context.get('tag_using_filenames', False)
force_composite = asbool(context.get('force_composite', 'False'))
writable_files = d_type.writable_files
writable_files_offset = 0
groups_incoming = [None for _ in range(file_count)]
for group_incoming in context.get(self.name, []):
i = int(group_incoming['__index__'])
groups_incoming[i] = group_incoming
if d_type.composite_type is not None or force_composite:
# handle uploading of composite datatypes
# Only one Dataset can be created
dataset = Bunch()
dataset.type = 'composite'
dataset.file_type = file_type
dataset.dbkey = dbkey
dataset.datatype = d_type
dataset.warnings = []
dataset.metadata = {}
dataset.composite_files = {}
dataset.uuid = None
dataset.tag_using_filenames = None
# load metadata
files_metadata = context.get(self.metadata_ref, {})
metadata_name_substition_default_dict = dict((composite_file.substitute_name_with_metadata, d_type.metadata_spec[composite_file.substitute_name_with_metadata].default) for composite_file in d_type.composite_files.values() if composite_file.substitute_name_with_metadata)
for meta_name, meta_spec in d_type.metadata_spec.items():
if meta_spec.set_in_upload:
if meta_name in files_metadata:
meta_value = files_metadata[meta_name]
if meta_name in metadata_name_substition_default_dict:
meta_value = sanitize_for_filename(meta_value, default=metadata_name_substition_default_dict[meta_name])
dataset.metadata[meta_name] = meta_value
dataset.name = self.get_composite_dataset_name(context)
if dataset.datatype.composite_type == 'auto_primary_file':
# replace sniff here with just creating an empty file
temp_name = sniff.stream_to_file(StringIO(d_type.generate_primary_file(dataset)), prefix='upload_auto_primary_file')
dataset.primary_file = temp_name
dataset.to_posix_lines = True
dataset.auto_decompress = True
dataset.space_to_tab = False
else:
file_bunch, warnings = get_one_filename(groups_incoming[0])
writable_files_offset = 1
dataset.primary_file = file_bunch.path
dataset.to_posix_lines = file_bunch.to_posix_lines
dataset.auto_decompress = file_bunch.auto_decompress
dataset.space_to_tab = file_bunch.space_to_tab
if file_bunch.file_type:
dataset.file_type = file_type
if file_bunch.dbkey:
dataset.dbkey = dbkey