当前位置: 首页>>代码示例>>Python>>正文


Python bunch.Bunch类代码示例

本文整理汇总了Python中galaxy.util.bunch.Bunch的典型用法代码示例。如果您正苦于以下问题:Python Bunch类的具体用法?Python Bunch怎么用?Python Bunch使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Bunch类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

    def __init__(self, root=None, **kwargs):
        Bunch.__init__(self, **kwargs)
        root = root or '/tmp'
        self.security = security.SecurityHelper(id_secret='bler')
        self.use_remote_user = kwargs.get('use_remote_user', False)
        self.file_path = '/tmp'
        self.jobs_directory = '/tmp'
        self.new_file_path = '/tmp'
        self.tool_data_path = '/tmp'

        self.object_store_config_file = ''
        self.object_store = 'disk'
        self.object_store_check_old_style = False

        self.user_activation_on = False
        self.new_user_dataset_access_role_default_private = False

        self.expose_dataset_path = True
        self.allow_user_dataset_purge = True
        self.enable_old_display_applications = True

        self.umask = 0o77

        # Follow two required by GenomeBuilds
        self.len_file_path = os.path.join('tool-data', 'shared', 'ucsc', 'chrom')
        self.builds_file_path = os.path.join('tool-data', 'shared', 'ucsc', 'builds.txt.sample')

        self.migrated_tools_config = "/tmp/migrated_tools_conf.xml"
        self.preserve_python_environment = "always"

        # set by MockDir
        self.root = root
开发者ID:bwlang,项目名称:galaxy,代码行数:32,代码来源:galaxy_mock.py

示例2: mock_trans

def mock_trans(has_user=True, is_admin=False):
    trans = Bunch(user_is_admin=lambda: is_admin)
    if has_user:
        trans.user = Bunch(preferences={})
    else:
        trans.user = None
    return trans
开发者ID:ImmPortDB,项目名称:immport-galaxy,代码行数:7,代码来源:test_toolbox_filters.py

示例3: mock_trans

def mock_trans( has_user=True ):
    trans = Bunch( )
    if has_user:
        trans.user = Bunch(preferences={})
    else:
        trans.user = None
    return trans
开发者ID:ValentinaPeona,项目名称:galaxy,代码行数:7,代码来源:test_toolbox_filters.py

示例4: __init__

 def __init__( self, **kwd ):
     Bunch.__init__( self, **kwd )
     self.primary_file = None
     self.composite_files = odict()
     self.dbkey = None
     self.warnings = []
     
     self._temp_filenames = [] #store all created filenames here, delete on cleanup
开发者ID:dbcls,项目名称:dbcls-galaxy,代码行数:8,代码来源:grouping.py

示例5: filter_factory

def filter_factory(config_dict=None):
    if config_dict is None:
        config_dict = dict(
            tool_filters=["filtermod:filter_tool"],
            tool_section_filters=["filtermod:filter_section"],
            tool_label_filters=["filtermod:filter_label_1", "filtermod:filter_label_2"],
        )
    config = Bunch(**config_dict)
    config.toolbox_filter_base_modules = "galaxy.tools.filters,unit.tools.filter_modules"
    app = Bunch(config=config)
    toolbox = Bunch(app=app)
    return FilterFactory(toolbox)
开发者ID:ImmPortDB,项目名称:immport-galaxy,代码行数:12,代码来源:test_toolbox_filters.py

示例6: __init__

    def __init__( self, **kwargs ):
        Bunch.__init__( self, **kwargs )
        self.security = security.SecurityHelper( id_secret='bler' )
        self.file_path = '/tmp'
        self.job_working_directory = '/tmp'
        self.new_file_path = '/tmp'

        self.object_store_config_file = ''
        self.object_store = 'disk'
        self.object_store_check_old_style = False

        self.user_activation_on = False
        self.new_user_dataset_access_role_default_private = False

        self.expose_dataset_path = True
        self.allow_user_dataset_purge = True
        self.enable_old_display_applications = True
开发者ID:BinglanLi,项目名称:galaxy,代码行数:17,代码来源:mock.py

示例7: get_dataset

def get_dataset(filename, index_attr='bam_index', dataset_id=1, has_data=True):
    dataset = Bunch()
    dataset.has_data = lambda: True
    dataset.id = dataset_id
    dataset.metadata = Bunch()
    with get_input_files(filename) as input_files, get_tmp_path() as index_path:
        dataset.file_name = input_files[0]
        index = Bunch()
        index.file_name = index_path
        setattr(dataset.metadata, index_attr, index)
        yield dataset
开发者ID:ImmPortDB,项目名称:immport-galaxy,代码行数:11,代码来源:util.py

示例8: __init__

    def __init__( self, root=None, **kwargs ):
        Bunch.__init__( self, **kwargs )
        self.security = security.SecurityHelper( id_secret='bler' )
        self.use_remote_user = kwargs.get( 'use_remote_user', False )
        self.file_path = '/tmp'
        self.jobs_directory = '/tmp'
        self.new_file_path = '/tmp'

        self.object_store_config_file = ''
        self.object_store = 'disk'
        self.object_store_check_old_style = False

        self.user_activation_on = False
        self.new_user_dataset_access_role_default_private = False

        self.expose_dataset_path = True
        self.allow_user_dataset_purge = True
        self.enable_old_display_applications = True

        self.umask = 0o77

        # set by MockDir
        self.root = root
开发者ID:AAFC-MBB,项目名称:galaxy-1,代码行数:23,代码来源:galaxy_mock.py

示例9: init

def init( file_path, url, engine_options={}, create_tables=False ):
    """Connect mappings to the database"""
    # Load the appropriate db module
    load_egg_for_url( url )
    # Create the database engine
    engine = create_engine( url, **engine_options )
    # Connect the metadata to the database.
    metadata.bind = engine
    # Clear any existing contextual sessions and reconfigure
    Session.remove()
    Session.configure( bind=engine )
    # Create tables if needed
    if create_tables:
        metadata.create_all()
    # Pack everything into a bunch
    result = Bunch( **globals() )
    result.engine = engine
    result.session = Session
    result.create_tables = create_tables
    # Load local tool shed security policy
    result.security_agent = CommunityRBACAgent( result )
    result.shed_counter = shed_statistics.ShedCounter( result )
    result.hgweb_config_manager = galaxy.webapps.tool_shed.util.hgweb_config.HgWebConfigManager()
    return result
开发者ID:knowingchaos,项目名称:galaxy,代码行数:24,代码来源:mapping.py

示例10: Bunch

        default=None,
        help="Fill empty columns with a values from a JSONified file.",
    )

    options, args = parser.parse_args()

    fill_options = None
    if options.fill_options_file is not None:
        try:
            fill_options = Bunch(
                **stringify_dictionary_keys(json.load(open(options.fill_options_file)))
            )  # json.load( open( options.fill_options_file ) )
        except Exception, e:
            print "Warning: Ignoring fill options due to json error (%s)." % e
    if fill_options is None:
        fill_options = Bunch()
    if "fill_unjoined_only" not in fill_options:
        fill_options.fill_unjoined_only = True
    if "file1_columns" not in fill_options:
        fill_options.file1_columns = None
    if "file2_columns" not in fill_options:
        fill_options.file2_columns = None

    try:
        filename1 = args[0]
        filename2 = args[1]
        column1 = int(args[2]) - 1
        column2 = int(args[3]) - 1
        out_filename = args[4]
    except:
        print >> sys.stderr, "Error parsing command line."
开发者ID:XikunHan,项目名称:galaxy,代码行数:31,代码来源:join.py

示例11: int

 cols = [ int( c ) for c in str( options.columns ).split( ',' ) if int( c ) > hinge ]
 inputs = [ options.input1, options.input2 ]
 if options.fill_options_file == 'None':
     inputs.extend( args )
 elif len( args ) > 0:
     inputs.extend( args )
 fill_options = None
 if options.fill_options_file != 'None' and options.fill_options_file is not None:
     try:
         if simplejson is None:
             raise simplejson_exception
         fill_options = Bunch( **stringify_dictionary_keys( simplejson.load( open( options.fill_options_file ) ) ) )
     except Exception, e:
         print 'Warning: Ignoring fill options due to simplejson error (%s).' % e
 if fill_options is None:
     fill_options = Bunch()
 if 'file1_columns' not in fill_options:
     fill_options.file1_columns = None
 if fill_options and fill_options.file1_columns:
     fill_empty = {}
     for col in cols:
         fill_empty[ col ] = fill_options.file1_columns[ col - 1 ]
 else:
     fill_empty = None
 assert len( cols ) > 0, 'You need to select at least one column in addition to the hinge'
 delimiter = '\t'
 # make sure all files are sorted in same way, ascending
 tmp_input_files = []
 input_files = inputs[:]
 for in_file in input_files:
     tmp_file = tempfile.NamedTemporaryFile()
开发者ID:agbiotec,项目名称:galaxy-tools-vcr,代码行数:31,代码来源:column_join.py

示例12: get_uploaded_datasets

 def get_uploaded_datasets( self, trans, context, override_name = None, override_info = None ):
     def get_data_file_filename( data_file, override_name = None, override_info = None ):
         dataset_name = override_name
         dataset_info = override_info
         def get_file_name( file_name ):
             file_name = file_name.split( '\\' )[-1]
             file_name = file_name.split( '/' )[-1]
             return file_name
         try:
             # Use the existing file
             if not dataset_name and 'filename' in data_file:
                 dataset_name = get_file_name( data_file['filename'] )
             if not dataset_info:
                 dataset_info = 'uploaded file'
             return Bunch( type='file', path=data_file['local_filename'], name=get_file_name( data_file['filename'] ) )
             #return 'file', data_file['local_filename'], get_file_name( data_file.filename ), dataset_name, dataset_info
         except:
             # The uploaded file should've been persisted by the upload tool action
             return Bunch( type=None, path=None, name=None )
             #return None, None, None, None, None
     def get_url_paste_urls_or_filename( group_incoming, override_name = None, override_info = None ):
         filenames = []
         url_paste_file = group_incoming.get( 'url_paste', None )
         if url_paste_file is not None:
             url_paste = open( url_paste_file, 'r' ).read( 1024 )
             if url_paste.lstrip().lower().startswith( 'http://' ) or url_paste.lstrip().lower().startswith( 'ftp://' ):
                 url_paste = url_paste.replace( '\r', '' ).split( '\n' )
                 for line in url_paste:
                     line = line.strip()
                     if line:
                         if not line.lower().startswith( 'http://' ) and not line.lower().startswith( 'ftp://' ):
                             continue # non-url line, ignore
                         precreated_name = line
                         dataset_name = override_name
                         if not dataset_name:
                             dataset_name = line
                         dataset_info = override_info
                         if not dataset_info:
                             dataset_info = 'uploaded url'
                         yield Bunch( type='url', path=line, name=precreated_name )
                         #yield ( 'url', line, precreated_name, dataset_name, dataset_info )
             else:
                 dataset_name = dataset_info = precreated_name = 'Pasted Entry' #we need to differentiate between various url pastes here
                 if override_name:
                     dataset_name = override_name
                 if override_info:
                     dataset_info = override_info
                 yield Bunch( type='file', path=url_paste_file, name=precreated_name )
                 #yield ( 'file', url_paste_file, precreated_name, dataset_name, dataset_info )
     def get_one_filename( context ):
         data_file = context['file_data']
         url_paste = context['url_paste']
         name = context.get( 'NAME', None )
         info = context.get( 'INFO', None )
         warnings = []
         space_to_tab = False 
         if context.get( 'space_to_tab', None ) not in ["None", None]:
             space_to_tab = True
         file_bunch = get_data_file_filename( data_file, override_name = name, override_info = info )
         if file_bunch.path and url_paste:
             if url_paste.strip():
                 warnings.append( "All file contents specified in the paste box were ignored." )
         else: #we need to use url_paste
             for file_bunch in get_url_paste_urls_or_filename( context, override_name = name, override_info = info ):
                 if file_bunch.path:
                     break
         return file_bunch, warnings
     def get_filenames( context ):
         rval = []
         data_file = context['file_data']
         url_paste = context['url_paste']
         name = context.get( 'NAME', None )
         info = context.get( 'INFO', None )
         space_to_tab = False
         if context.get( 'space_to_tab', None ) not in ["None", None]:
             space_to_tab = True
         warnings = []
         file_bunch = get_data_file_filename( data_file, override_name = name, override_info = info )
         if file_bunch.path:
             file_bunch.space_to_tab = space_to_tab
             rval.append( file_bunch )
         for file_bunch in get_url_paste_urls_or_filename( context, override_name = name, override_info = info ):
             if file_bunch.path:
                 file_bunch.space_to_tab = space_to_tab
                 rval.append( file_bunch )
         return rval
     file_type = self.get_file_type( context )
     d_type = self.get_datatype( trans, context )
     dbkey = context.get( 'dbkey', None )
     writable_files = d_type.writable_files
     writable_files_offset = 0
     groups_incoming = [ None for filename in writable_files ]
     for group_incoming in context.get( self.name, [] ):
         i = int( group_incoming['__index__'] )
         groups_incoming[ i ] = group_incoming
     if d_type.composite_type is not None:
         #handle uploading of composite datatypes
         #Only one Dataset can be created
         dataset = Bunch()
         dataset.type = 'composite'
#.........这里部分代码省略.........
开发者ID:msGenDev,项目名称:Yeps-EURAC,代码行数:101,代码来源:grouping.py

示例13: main

def main():
    parser = optparse.OptionParser()
    parser.add_option(
        '-b', '--buffer',
        dest='buffer',
        type='int', default=1000000,
        help='Number of lines to buffer at a time. Default: 1,000,000 lines. A buffer of 0 will attempt to use memory only.'
    )
    parser.add_option(
        '-d', '--index_depth',
        dest='index_depth',
        type='int', default=3,
        help='Depth to use on filebased offset indexing. Default: 3.'
    )
    parser.add_option(
        '-p', '--keep_partial',
        action='store_true',
        dest='keep_partial',
        default=False,
        help='Keep rows in first input which are missing identifiers.')
    parser.add_option(
        '-u', '--keep_unmatched',
        action='store_true',
        dest='keep_unmatched',
        default=False,
        help='Keep rows in first input which are not joined with the second input.')
    parser.add_option(
        '-f', '--fill_options_file',
        dest='fill_options_file',
        type='str', default=None,
        help='Fill empty columns with a values from a JSONified file.')
    parser.add_option(
        '-H', '--keep_headers',
        action='store_true',
        dest='keep_headers',
        default=False,
        help='Keep the headers')

    options, args = parser.parse_args()

    fill_options = None
    if options.fill_options_file is not None:
        try:
            fill_options = Bunch(**stringify_dictionary_keys(json.load(open(options.fill_options_file))))  # json.load( open( options.fill_options_file ) )
        except Exception as e:
            print("Warning: Ignoring fill options due to json error (%s)." % e)
    if fill_options is None:
        fill_options = Bunch()
    if 'fill_unjoined_only' not in fill_options:
        fill_options.fill_unjoined_only = True
    if 'file1_columns' not in fill_options:
        fill_options.file1_columns = None
    if 'file2_columns' not in fill_options:
        fill_options.file2_columns = None

    try:
        filename1 = args[0]
        filename2 = args[1]
        column1 = int(args[2]) - 1
        column2 = int(args[3]) - 1
        out_filename = args[4]
    except Exception:
        print("Error parsing command line.", file=sys.stderr)
        sys.exit()

    # Character for splitting fields and joining lines
    split = "\t"

    return join_files(filename1, column1, filename2, column2, out_filename, split, options.buffer, options.keep_unmatched, options.keep_partial, options.keep_headers, options.index_depth, fill_options=fill_options)
开发者ID:lappsgrid-incubator,项目名称:Galaxy,代码行数:69,代码来源:join.py

示例14: __main__


#.........这里部分代码省略.........
    # Handle --strand
    set_options += '--strand=%s ' % options.strand
    # Handle --ambiguous
    if options.ambiguous not in [ "no" ]:
        set_options += '--ambiguous=%s ' % options.ambiguous
    # Handle --shortcuts_for_yasra
    if options.shortcuts_for_yasra not in [ 'none' ]:
        set_options += '--%s ' % ( options.shortcuts_for_yasra )
    # Specify input2 and add [fullnames] modifier if output format is diffs
    if options.format == 'diffs':
        input2 = '%s[fullnames]' % options.input2
    else:
        input2 = options.input2
    if options.format == 'tabular':
        # Change output format to general if it's tabular and add field names for tabular output
        format = 'general-'
        tabular_fields = ':score,name1,strand1,size1,start1,zstart1,end1,length1,text1,name2,strand2,size2,start2,zstart2,end2,start2+,zstart2+,end2+,length2,text2,diff,cigar,identity,coverage,gaprate,diagonal,shingle'
    elif options.format == 'sam':
        # We currently need to keep headers.
        format = 'sam'
        tabular_fields = ''
    else:
        format = options.format
        tabular_fields = ''
    # Set up our queues
    threads = int( options.threads )
    lastz_job_queue = LastzJobQueue( threads, slots=SLOTS )
    combine_data_queue = CombineDataQueue( options.output )
    if str( options.ref_source ) in [ 'history', 'self' ]:
        # Reference is a fasta dataset from the history or the dataset containing the target sequence itself,
        # so split job across the number of sequences in the dataset ( this could be a HUGE number ).
        try:
            # Ensure there is at least 1 sequence in the dataset ( this may not be necessary ).
            error_msg = "The reference dataset is missing metadata.  Click the pencil icon in the history item and 'auto-detect' the metadata attributes."
            ref_sequences = int( options.ref_sequences )
            if ref_sequences < 1:
                stop_queues( lastz_job_queue, combine_data_queue )
                stop_err( error_msg )
        except:
            stop_queues( lastz_job_queue, combine_data_queue )
            stop_err( error_msg )
        seqs = 0
        fasta_reader = FastaReader( open( options.input1 ) )
        while True:
            # Read the next sequence from the reference dataset
            seq = fasta_reader.next()
            if not seq:
                break
            seqs += 1
            # Create a temporary file to contain the current sequence as input to lastz
            tmp_in_fd, tmp_in_name = tempfile.mkstemp( suffix='.in' )
            tmp_in = os.fdopen( tmp_in_fd, 'wb' )
            # Write the current sequence to the temporary input file
            tmp_in.write( '>%s\n%s\n' % ( seq.name, seq.text ) )
            tmp_in.close()
            # Create a 2nd temporary file to contain the output from lastz execution on the current sequence
            tmp_out_fd, tmp_out_name = tempfile.mkstemp( suffix='.out' )
            os.close( tmp_out_fd )
            # Generate the command line for calling lastz on the current sequence
            command = 'lastz %s%s %s %s --format=%s%s > %s' % ( tmp_in_name, ref_name, input2, set_options, format, tabular_fields, tmp_out_name )
            # Create a job object
            job = Bunch()
            job.command = command
            job.output = tmp_out_name
            job.cleanup = [ tmp_in_name, tmp_out_name ]
            job.combine_data_queue = combine_data_queue
            # Add another job to the lastz_job_queue.  Execution will wait at this point if the queue is full.
            lastz_job_queue.put( job, block=True )
        # Make sure the value of sequences in the metadata is the same as the number of
        # sequences read from the dataset.  According to Bob, this may not be necessary.
        if ref_sequences != seqs:
            stop_queues( lastz_job_queue, combine_data_queue )
            stop_err( "The value of metadata.sequences (%d) differs from the number of sequences read from the reference (%d)." % ( ref_sequences, seqs ) )
    else:
        # Reference is a locally cached 2bit file, split job across number of chroms in 2bit file
        tbf = TwoBitFile( open( options.input1, 'r' ) )
        for chrom in tbf.keys():
            # Create a temporary file to contain the output from lastz execution on the current chrom
            tmp_out_fd, tmp_out_name = tempfile.mkstemp( suffix='.out' )
            os.close( tmp_out_fd )
            command = 'lastz %s/%s%s %s %s --format=%s%s >> %s' % \
                ( options.input1, chrom, ref_name, input2, set_options, format, tabular_fields, tmp_out_name )
            # Create a job object
            job = Bunch()
            job.command = command
            job.output = tmp_out_name
            job.cleanup = [ tmp_out_name ]
            job.combine_data_queue = combine_data_queue
            # Add another job to the lastz_job_queue.  Execution will wait at this point if the queue is full.
            lastz_job_queue.put( job, block=True )
    # Stop the lastz_job_queue.
    for t in lastz_job_queue.threads:
        lastz_job_queue.put( STOP_SIGNAL, True )
    # Although all jobs are submitted to the queue, we can't shut down the combine_data_queue
    # until we know that all jobs have been submitted to its queue.  We do this by checking
    # whether all of the threads in the lastz_job_queue have terminated.
    while threading.activeCount() > 2:
        time.sleep( 1 )
    # Now it's safe to stop the combine_data_queue.
    combine_data_queue.put( STOP_SIGNAL )
开发者ID:jmchilton,项目名称:devteamdevshedtest1,代码行数:101,代码来源:lastz_wrapper.py

示例15: get_uploaded_datasets

    def get_uploaded_datasets(self, trans, context, override_name=None, override_info=None):
        def get_data_file_filename(data_file, override_name=None, override_info=None, purge=True):
            dataset_name = override_name

            def get_file_name(file_name):
                file_name = file_name.split('\\')[-1]
                file_name = file_name.split('/')[-1]
                return file_name
            try:
                # Use the existing file
                if not dataset_name and 'filename' in data_file:
                    dataset_name = get_file_name(data_file['filename'])
                return Bunch(type='file', path=data_file['local_filename'], name=dataset_name, purge_source=purge)
            except Exception:
                # The uploaded file should've been persisted by the upload tool action
                return Bunch(type=None, path=None, name=None)

        def get_url_paste_urls_or_filename(group_incoming, override_name=None, override_info=None):
            url_paste_file = group_incoming.get('url_paste', None)
            if url_paste_file is not None:
                url_paste = open(url_paste_file, 'r').read()

                def start_of_url(content):
                    start_of_url_paste = content.lstrip()[0:8].lower()
                    looks_like_url = False
                    for url_prefix in ["http://", "https://", "ftp://", "file://"]:
                        if start_of_url_paste.startswith(url_prefix):
                            looks_like_url = True
                            break

                    return looks_like_url

                if start_of_url(url_paste):
                    url_paste = url_paste.replace('\r', '').split('\n')
                    for line in url_paste:
                        line = line.strip()
                        if line:
                            if not start_of_url(line):
                                continue  # non-url line, ignore

                            if "file://" in line:
                                if not trans.user_is_admin:
                                    raise AdminRequiredException()
                                elif not trans.app.config.allow_path_paste:
                                    raise ConfigDoesNotAllowException()
                                upload_path = line[len("file://"):]
                                dataset_name = os.path.basename(upload_path)
                            else:
                                dataset_name = line

                            if override_name:
                                dataset_name = override_name
                            yield Bunch(type='url', path=line, name=dataset_name)
                else:
                    dataset_name = 'Pasted Entry'  # we need to differentiate between various url pastes here
                    if override_name:
                        dataset_name = override_name
                    yield Bunch(type='file', path=url_paste_file, name=dataset_name)

        def get_one_filename(context):
            data_file = context['file_data']
            url_paste = context['url_paste']
            ftp_files = context['ftp_files']
            name = context.get('NAME', None)
            info = context.get('INFO', None)
            uuid = context.get('uuid', None) or None  # Turn '' to None
            file_type = context.get('file_type', None)
            dbkey = self.get_dbkey(context)
            warnings = []
            to_posix_lines = False
            if context.get('to_posix_lines', None) not in ["None", None, False]:
                to_posix_lines = True
            auto_decompress = False
            if context.get('auto_decompress', None) not in ["None", None, False]:
                auto_decompress = True
            space_to_tab = False
            if context.get('space_to_tab', None) not in ["None", None, False]:
                space_to_tab = True
            file_bunch = get_data_file_filename(data_file, override_name=name, override_info=info)
            if file_bunch.path:
                if url_paste is not None and url_paste.strip():
                    warnings.append("All file contents specified in the paste box were ignored.")
                if ftp_files:
                    warnings.append("All FTP uploaded file selections were ignored.")
            elif url_paste is not None and url_paste.strip():  # we need to use url_paste
                for file_bunch in get_url_paste_urls_or_filename(context, override_name=name, override_info=info):
                    if file_bunch.path:
                        break
                if file_bunch.path and ftp_files is not None:
                    warnings.append("All FTP uploaded file selections were ignored.")
            elif ftp_files is not None and trans.user is not None:  # look for files uploaded via FTP
                user_ftp_dir = trans.user_ftp_dir
                assert not os.path.islink(user_ftp_dir), "User FTP directory cannot be a symbolic link"
                for (dirpath, dirnames, filenames) in os.walk(user_ftp_dir):
                    for filename in filenames:
                        for ftp_filename in ftp_files:
                            if ftp_filename == filename:
                                path = relpath(os.path.join(dirpath, filename), user_ftp_dir)
                                if not os.path.islink(os.path.join(dirpath, filename)):
                                    ftp_data_file = {'local_filename' : os.path.abspath(os.path.join(user_ftp_dir, path)),
#.........这里部分代码省略.........
开发者ID:msauria,项目名称:galaxy,代码行数:101,代码来源:grouping.py


注:本文中的galaxy.util.bunch.Bunch类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。