本文整理汇总了Python中galaxy.tools.DefaultToolState类的典型用法代码示例。如果您正苦于以下问题:Python DefaultToolState类的具体用法?Python DefaultToolState怎么用?Python DefaultToolState使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DefaultToolState类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: recover_state
def recover_state(self, state, **kwds):
""" Recover state `dict` from simple dictionary describing configuration
state (potentially from persisted step state).
Sub-classes should supply a `default_state` method which contains the
initial state `dict` with key, value pairs for all available attributes.
"""
self.state = DefaultToolState()
inputs = self.get_inputs()
if inputs:
self.state.decode(state, Bunch(inputs=inputs), self.trans.app)
else:
self.state.inputs = safe_loads(state) or {}
示例2: decode_runtime_state
def decode_runtime_state( self, trans, string ):
fake_tool = Bunch( inputs = self.get_runtime_inputs() )
state = DefaultToolState()
state.decode( string, fake_tool, trans.app )
return state
示例3: get_runtime_state
def get_runtime_state( self ):
state = DefaultToolState()
state.inputs = dict( input=None )
return state
示例4: upload_async_create
def upload_async_create( self, trans, tool_id=None, **kwd ):
"""
Precreate datasets for asynchronous uploading.
"""
cntrller = kwd.get( 'cntrller', '' )
roles = kwd.get( 'roles', False )
if roles:
# The user associated the DATASET_ACCESS permission on the uploaded datasets with 1 or more roles.
# We need to ensure that the roles are legitimately derived from the roles associated with the LIBRARY_ACCESS
# permission if the library is not public ( this should always be the case since any ill-legitimate roles
# were filtered out of the roles displayed on the upload form. In addition, we need to ensure that the user
# did not associated roles that would make the dataset in-accessible by everyone.
library_id = trans.app.security.decode_id( kwd.get( 'library_id', '' ) )
vars = dict( DATASET_ACCESS_in=roles )
permissions, in_roles, error, msg = trans.app.security_agent.derive_roles_from_access( trans, library_id, cntrller, library=True, **vars )
if error:
return [ 'error', msg ]
def create_dataset( name ):
ud = Bunch( name=name, file_type=None, dbkey=None )
if nonfile_params.get( 'folder_id', False ):
replace_id = nonfile_params.get( 'replace_id', None )
if replace_id not in [ None, 'None' ]:
replace_dataset = trans.sa_session.query( trans.app.model.LibraryDataset ).get( trans.security.decode_id( replace_id ) )
else:
replace_dataset = None
# FIXME: instead of passing params here ( chiech have been process by util.Params(), the original kwd
# should be passed so that complex objects that may have been included in the initial request remain.
library_bunch = upload_common.handle_library_params( trans, nonfile_params, nonfile_params.folder_id, replace_dataset )
else:
library_bunch = None
return upload_common.new_upload( trans, cntrller, ud, library_bunch=library_bunch, state=trans.app.model.HistoryDatasetAssociation.states.UPLOAD )
tool = self.get_toolbox().get_tool( tool_id )
if not tool:
return False # bad tool_id
nonfile_params = galaxy.util.Params( kwd, sanitize=False )
if kwd.get( 'tool_state', None ) not in ( None, 'None' ):
encoded_state = galaxy.util.string_to_object( kwd["tool_state"] )
tool_state = DefaultToolState()
tool_state.decode( encoded_state, tool, trans.app )
else:
tool_state = tool.new_state( trans )
tool.update_state( trans, tool.inputs, tool_state.inputs, kwd, update_only = True )
datasets = []
dataset_upload_inputs = []
for input_name, input in tool.inputs.iteritems():
if input.type == "upload_dataset":
dataset_upload_inputs.append( input )
assert dataset_upload_inputs, Exception( "No dataset upload groups were found." )
for dataset_upload_input in dataset_upload_inputs:
d_type = dataset_upload_input.get_datatype( trans, kwd )
if d_type.composite_type is not None:
datasets.append( create_dataset( dataset_upload_input.get_composite_dataset_name( kwd ) ) )
else:
params = Bunch( ** tool_state.inputs[dataset_upload_input.name][0] )
if params.file_data not in [ None, "" ]:
name = params.file_data
if name.count('/'):
name = name.rsplit('/',1)[1]
if name.count('\\'):
name = name.rsplit('\\',1)[1]
datasets.append( create_dataset( name ) )
if params.url_paste not in [ None, "" ]:
url_paste = params.url_paste.replace( '\r', '' ).split( '\n' )
url = False
for line in url_paste:
line = line.rstrip( '\r\n' ).strip()
if not line:
continue
elif line.lower().startswith( 'http://' ) or line.lower().startswith( 'ftp://' ) or line.lower().startswith( 'https://' ):
url = True
datasets.append( create_dataset( line ) )
else:
if url:
continue # non-url when we've already processed some urls
else:
# pasted data
datasets.append( create_dataset( 'Pasted Entry' ) )
break
return [ d.id for d in datasets ]
示例5: get_runtime_state
def get_runtime_state(self):
state = DefaultToolState()
state.inputs = self.state.inputs
return state
示例6: __init__
def __init__(self, trans, content_id=None, **kwds):
self.trans = trans
self.content_id = content_id
self.state = DefaultToolState()
示例7: WorkflowModule
class WorkflowModule(object):
def __init__(self, trans, content_id=None, **kwds):
self.trans = trans
self.content_id = content_id
self.state = DefaultToolState()
# ---- Creating modules from various representations ---------------------
@classmethod
def from_dict(Class, trans, d, **kwds):
module = Class(trans, **kwds)
module.recover_state(d.get("tool_state"))
module.label = d.get("label")
return module
@classmethod
def from_workflow_step(Class, trans, step, **kwds):
module = Class(trans, **kwds)
module.recover_state(step.tool_inputs)
module.label = step.label
return module
# ---- Saving in various forms ------------------------------------------
def save_to_step(self, step):
step.type = self.type
step.tool_inputs = self.get_state()
# ---- General attributes -----------------------------------------------
def get_type(self):
return self.type
def get_name(self):
return self.name
def get_version(self):
return None
def get_content_id(self):
""" If this component has an identifier external to the step (such
as a tool or another workflow) return the identifier for that content.
"""
return None
def get_tooltip(self, static_path=''):
return None
# ---- Configuration time -----------------------------------------------
def get_state(self, nested=True):
""" Return a serializable representation of the persistable state of
the step.
"""
inputs = self.get_inputs()
if inputs:
return self.state.encode(Bunch(inputs=inputs), self.trans.app, nested=nested)
else:
return self.state.inputs
def recover_state(self, state, **kwds):
""" Recover state `dict` from simple dictionary describing configuration
state (potentially from persisted step state).
Sub-classes should supply a `default_state` method which contains the
initial state `dict` with key, value pairs for all available attributes.
"""
self.state = DefaultToolState()
inputs = self.get_inputs()
if inputs:
self.state.decode(state, Bunch(inputs=inputs), self.trans.app)
else:
self.state.inputs = safe_loads(state) or {}
def get_errors(self):
""" This returns a step related error message as string or None """
return None
def get_inputs(self):
""" This returns inputs displayed in the workflow editor """
return {}
def get_data_inputs(self):
""" Get configure time data input descriptions. """
return []
def get_data_outputs(self):
return []
def get_post_job_actions(self, incoming):
return []
def check_and_update_state(self):
"""
If the state is not in sync with the current implementation of the
module, try to update. Returns a list of messages to be displayed
"""
pass
#.........这里部分代码省略.........
示例8: decode_runtime_state
def decode_runtime_state(self, runtime_state):
""" Takes the serialized runtime state and decodes it when running the workflow. """
state = DefaultToolState()
state.decode(runtime_state, Bunch(inputs=self.get_runtime_inputs()), self.trans.app)
return state
示例9: __inputs_to_state
def __inputs_to_state( self, inputs ):
tool_state = DefaultToolState()
tool_state.inputs = inputs
return tool_state
示例10: __string_to_state
def __string_to_state( self, state_string ):
encoded_state = string_to_object( state_string )
state = DefaultToolState()
state.decode( encoded_state, self.tool, self.app )
return state
示例11: upload_async_create
def upload_async_create( self, trans, tool_id=None, **kwd ):
"""
Precreate datasets for asynchronous uploading.
"""
def create_dataset( name, history ):
data = trans.app.model.HistoryDatasetAssociation( create_dataset = True )
data.name = name
data.state = data.states.UPLOAD
data.history = history
data.flush()
history.add_dataset( data )
return data
tool = self.get_toolbox().tools_by_id.get( tool_id, None )
if not tool:
return False # bad tool_id
#params = util.Params( kwd, sanitize=tool.options.sanitize, tool=tool )
if "tool_state" in kwd:
encoded_state = util.string_to_object( kwd["tool_state"] )
tool_state = DefaultToolState()
tool_state.decode( encoded_state, tool, trans.app )
else:
tool_state = tool.new_state( trans )
errors = tool.update_state( trans, tool.inputs, tool_state.inputs, kwd, update_only = True )
datasets = []
dataset_upload_inputs = []
for input_name, input in tool.inputs.iteritems():
if input.type == "upload_dataset":
dataset_upload_inputs.append( input )
assert dataset_upload_inputs, Exception( "No dataset upload groups were found." )
for dataset_upload_input in dataset_upload_inputs:
d_type = dataset_upload_input.get_datatype( trans, kwd )
if d_type.composite_type is not None:
datasets.append( create_dataset( 'Uploaded Composite Dataset (%s)' % dataset_upload_input.get_datatype_ext( trans, kwd ), trans.history ) )
else:
params = Bunch( ** tool_state.inputs[dataset_upload_input.name][0] )
if params.file_data not in [ None, "" ]:
name = params.file_data
if name.count('/'):
name = name.rsplit('/',1)[1]
if name.count('\\'):
name = name.rsplit('\\',1)[1]
datasets.append( create_dataset( name, trans.history ) )
if params.url_paste not in [ None, "" ]:
url_paste = params.url_paste.replace( '\r', '' ).split( '\n' )
url = False
for line in url_paste:
line = line.rstrip( '\r\n' ).strip()
if not line:
continue
elif line.lower().startswith( 'http://' ) or line.lower().startswith( 'ftp://' ):
url = True
datasets.append( create_dataset( line, trans.history ) )
else:
if url:
continue # non-url when we've already processed some urls
else:
# pasted data
datasets.append( create_dataset( 'Pasted Entry', trans.history ) )
break
if datasets:
trans.model.flush()
return [ d.id for d in datasets ]
示例12: upload_async_create
def upload_async_create( self, trans, tool_id=None, **kwd ):
"""
Precreate datasets for asynchronous uploading.
"""
permissions = trans.app.security_agent.history_get_default_permissions( trans.history )
def create_dataset( name ):
ud = Bunch( name=name, file_type=None, dbkey=None )
if nonfile_params.get( 'folder_id', False ):
replace_id = nonfile_params.get( 'replace_id', None )
if replace_id not in [ None, 'None' ]:
replace_dataset = trans.sa_session.query( l.LibraryDataset ).get( int( replace_id ) )
else:
replace_dataset = None
library_bunch = upload_common.handle_library_params( trans, nonfile_params, nonfile_params.folder_id, replace_dataset )
else:
library_bunch = None
return upload_common.new_upload( trans, ud, library_bunch=library_bunch, state=trans.app.model.HistoryDatasetAssociation.states.UPLOAD )
tool = self.get_toolbox().tools_by_id.get( tool_id, None )
if not tool:
return False # bad tool_id
nonfile_params = util.Params( kwd, sanitize=False )
if kwd.get( 'tool_state', None ) not in ( None, 'None' ):
encoded_state = util.string_to_object( kwd["tool_state"] )
tool_state = DefaultToolState()
tool_state.decode( encoded_state, tool, trans.app )
else:
tool_state = tool.new_state( trans )
errors = tool.update_state( trans, tool.inputs, tool_state.inputs, kwd, update_only = True )
datasets = []
dataset_upload_inputs = []
for input_name, input in tool.inputs.iteritems():
if input.type == "upload_dataset":
dataset_upload_inputs.append( input )
assert dataset_upload_inputs, Exception( "No dataset upload groups were found." )
for dataset_upload_input in dataset_upload_inputs:
d_type = dataset_upload_input.get_datatype( trans, kwd )
if d_type.composite_type is not None:
datasets.append( create_dataset( 'Uploaded Composite Dataset (%s)' % dataset_upload_input.get_datatype_ext( trans, kwd ) ) )
else:
params = Bunch( ** tool_state.inputs[dataset_upload_input.name][0] )
if params.file_data not in [ None, "" ]:
name = params.file_data
if name.count('/'):
name = name.rsplit('/',1)[1]
if name.count('\\'):
name = name.rsplit('\\',1)[1]
datasets.append( create_dataset( name ) )
if params.url_paste not in [ None, "" ]:
url_paste = params.url_paste.replace( '\r', '' ).split( '\n' )
url = False
for line in url_paste:
line = line.rstrip( '\r\n' ).strip()
if not line:
continue
elif line.lower().startswith( 'http://' ) or line.lower().startswith( 'ftp://' ):
url = True
datasets.append( create_dataset( line ) )
else:
if url:
continue # non-url when we've already processed some urls
else:
# pasted data
datasets.append( create_dataset( 'Pasted Entry' ) )
break
return [ d.id for d in datasets ]