本文整理汇总了Python中mantid.api.AnalysisDataService.retrieve方法的典型用法代码示例。如果您正苦于以下问题:Python AnalysisDataService.retrieve方法的具体用法?Python AnalysisDataService.retrieve怎么用?Python AnalysisDataService.retrieve使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mantid.api.AnalysisDataService
的用法示例。
在下文中一共展示了AnalysisDataService.retrieve方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_that_can_find_can_reduction_if_it_exists
# 需要导入模块: from mantid.api import AnalysisDataService [as 别名]
# 或者: from mantid.api.AnalysisDataService import retrieve [as 别名]
def test_that_can_find_can_reduction_if_it_exists(self):
# Arrange
test_director = TestDirector()
state = test_director.construct()
tagged_workspace_names = {None: "test_ws",
OutputParts.Count: "test_ws_count",
OutputParts.Norm: "test_ws_norm"}
SANSFunctionsTest._prepare_workspaces(number_of_workspaces=4,
tagged_workspace_names=tagged_workspace_names,
state=state,
reduction_mode=ISISReductionMode.LAB)
# Act
workspace, workspace_count, workspace_norm = get_reduced_can_workspace_from_ads(state, output_parts=True,
reduction_mode=ISISReductionMode.LAB) # noqa
# Assert
self.assertTrue(workspace is not None)
self.assertTrue(workspace.name() == AnalysisDataService.retrieve("test_ws").name())
self.assertTrue(workspace_count is not None)
self.assertTrue(workspace_count.name() == AnalysisDataService.retrieve("test_ws_count").name())
self.assertTrue(workspace_norm is not None)
self.assertTrue(workspace_norm.name() == AnalysisDataService.retrieve("test_ws_norm").name())
# Clean up
SANSFunctionsTest._remove_workspaces()
示例2: test_that_can_load_isis_nexus_file_with_event_data_and_multi_period
# 需要导入模块: from mantid.api import AnalysisDataService [as 别名]
# 或者: from mantid.api.AnalysisDataService import retrieve [as 别名]
def test_that_can_load_isis_nexus_file_with_event_data_and_multi_period(self):
# Arrange
state = SANSLoadTest._get_simple_state(sample_scatter="LARMOR00013065.nxs",
calibration="80tubeCalibration_18-04-2016_r9330-9335.nxs")
# Act
output_workspace_names = {"SampleScatterWorkspace": "sample_scatter",
"SampleScatterMonitorWorkspace": "sample_monitor_scatter"}
load_alg = self._run_load(state, publish_to_cache=True, use_cached=True, move_workspace=False,
output_workspace_names=output_workspace_names)
# Assert
expected_number_of_workspaces = [4, 0, 0, 0, 0, 0]
expected_number_on_ads = 1
workspace_type = [EventWorkspace, None, None, None, None, None]
self._do_test_output(load_alg, expected_number_of_workspaces, expected_number_on_ads, workspace_type)
# Check that calibration is added
self.assertTrue(SANSLoadTest._has_calibration_been_applied(load_alg))
# Confirm that the ADS workspace contains the calibration file
try:
AnalysisDataService.retrieve("80tubeCalibration_18-04-2016_r9330-9335")
on_ads = True
except RuntimeError:
on_ads = False
self.assertTrue(on_ads)
# Cleanup
remove_all_workspaces_from_ads()
示例3: testConvertUnits
# 需要导入模块: from mantid.api import AnalysisDataService [as 别名]
# 或者: from mantid.api.AnalysisDataService import retrieve [as 别名]
def testConvertUnits(self):
# test whether CorrectTof+ConvertUnits+ConvertToDistribution will give the same result as TOFTOFConvertTOFToDeltaE
OutputWorkspaceName = "outputws1"
alg_test = run_algorithm("CorrectTOF", InputWorkspace=self._input_ws, EPPTable=self._table, OutputWorkspace=OutputWorkspaceName)
self.assertTrue(alg_test.isExecuted())
wscorr = AnalysisDataService.retrieve(OutputWorkspaceName)
# convert units, convert to distribution
alg_cu = run_algorithm("ConvertUnits", InputWorkspace=wscorr, Target='DeltaE', EMode='Direct', EFixed=2.27, OutputWorkspace=OutputWorkspaceName+'_dE')
ws_dE = AnalysisDataService.retrieve(OutputWorkspaceName+'_dE')
alg_cd = run_algorithm("ConvertToDistribution", Workspace=ws_dE)
# create reference data for X axis
tof1 = 2123.33867005
dataX = self._input_ws.readX(0) - tof1
tel = 8189.5 - tof1
factor = m_n*1e+15/eV
newX = 0.5*factor*16.0*(1/tel**2 - 1/dataX**2)
# compare
# self.assertEqual(newX[0], ws_dE.readX(0)[0])
self.assertTrue(np.allclose(newX, ws_dE.readX(0), atol=0.01))
# create reference data for Y axis and compare to the output
tof = dataX[:-1] + 5.25
newY = self._input_ws.readY(0)*tof**3/(factor*10.5*16.0)
# compare
self.assertTrue(np.allclose(newY, ws_dE.readY(0), rtol=0.01))
run_algorithm("DeleteWorkspace", Workspace=ws_dE)
run_algorithm("DeleteWorkspace", Workspace=wscorr)
示例4: edit_matrix_workspace
# 需要导入模块: from mantid.api import AnalysisDataService [as 别名]
# 或者: from mantid.api.AnalysisDataService import retrieve [as 别名]
def edit_matrix_workspace(sq_name, scale_factor, shift, edited_sq_name=None):
"""
Edit the matrix workspace of S(Q) by scaling and shift
:param sq_name: name of the SofQ workspace
:param scale_factor:
:param shift:
:param edited_sq_name: workspace for the edited S(Q)
:return:
"""
# get the workspace
if AnalysisDataService.doesExist(sq_name) is False:
raise RuntimeError('S(Q) workspace {0} cannot be found in ADS.'.format(sq_name))
if edited_sq_name is not None:
simpleapi.CloneWorkspace(InputWorkspace=sq_name, OutputWorkspace=edited_sq_name)
sq_ws = AnalysisDataService.retrieve(edited_sq_name)
else:
sq_ws = AnalysisDataService.retrieve(sq_name)
# get the vector of Y
sq_ws = sq_ws * scale_factor
sq_ws = sq_ws + shift
if sq_ws.name() != edited_sq_name:
simpleapi.DeleteWorkspace(Workspace=edited_sq_name)
simpleapi.RenameWorkspace(InputWorkspace=sq_ws, OutputWorkspace=edited_sq_name)
assert sq_ws is not None, 'S(Q) workspace cannot be None.'
print('[DB...BAT] S(Q) workspace that is edit is {0}'.format(sq_ws))
示例5: test_DNSFRSelfCorrection
# 需要导入模块: from mantid.api import AnalysisDataService [as 别名]
# 或者: from mantid.api.AnalysisDataService import retrieve [as 别名]
def test_DNSFRSelfCorrection(self):
outputWorkspaceName = "DNSFlippingRatioCorrTest_Test4"
# consider normalization=1.0 as set in self._create_fake_workspace
dataws_sf = self.__sf_nicrws - self.__sf_bkgrws
dataws_nsf = self.__nsf_nicrws - self.__nsf_bkgrws
alg_test = run_algorithm("DNSFlippingRatioCorr", SFDataWorkspace=dataws_sf,
NSFDataWorkspace=dataws_nsf, SFNiCrWorkspace=self.__sf_nicrws.getName(),
NSFNiCrWorkspace=self.__nsf_nicrws.getName(), SFBkgrWorkspace=self.__sf_bkgrws.getName(),
NSFBkgrWorkspace=self.__nsf_bkgrws.getName(), SFOutputWorkspace=outputWorkspaceName+'SF',
NSFOutputWorkspace=outputWorkspaceName+'NSF')
self.assertTrue(alg_test.isExecuted())
# check whether the data are correct
ws_sf = AnalysisDataService.retrieve(outputWorkspaceName + 'SF')
ws_nsf = AnalysisDataService.retrieve(outputWorkspaceName + 'NSF')
# dimensions
self.assertEqual(24, ws_sf.getNumberHistograms())
self.assertEqual(24, ws_nsf.getNumberHistograms())
self.assertEqual(2, ws_sf.getNumDims())
self.assertEqual(2, ws_nsf.getNumDims())
# data array: spin-flip must be zero
for i in range(24):
self.assertAlmostEqual(0.0, ws_sf.readY(i)[0])
# data array: non spin-flip must be nsf - sf^2/nsf
nsf = np.array(dataws_nsf.extractY())
sf = np.array(dataws_sf.extractY())
refdata = nsf + sf
for i in range(24):
self.assertAlmostEqual(refdata[i][0], ws_nsf.readY(i)[0])
run_algorithm("DeleteWorkspace", Workspace=outputWorkspaceName + 'SF')
run_algorithm("DeleteWorkspace", Workspace=outputWorkspaceName + 'NSF')
run_algorithm("DeleteWorkspace", Workspace=dataws_sf)
run_algorithm("DeleteWorkspace", Workspace=dataws_nsf)
return
示例6: _check_if_all_multi_period_workspaces_have_the_same_position
# 需要导入模块: from mantid.api import AnalysisDataService [as 别名]
# 或者: from mantid.api.AnalysisDataService import retrieve [as 别名]
def _check_if_all_multi_period_workspaces_have_the_same_position(self, base_name, number_of_workspaces):
reference_name = base_name + str(1)
reference_workspace = AnalysisDataService.retrieve(reference_name)
reference_position, reference_rotation = self._get_position_and_rotation(reference_workspace)
for index in range(2, number_of_workspaces + 1):
ws_name = base_name + str(index)
workspace = AnalysisDataService.retrieve(ws_name)
position, rotation = self._get_position_and_rotation(workspace)
self.assertEqual(position, reference_position)
self.assertEqual(rotation, reference_rotation)
示例7: calculate_peak_center
# 需要导入模块: from mantid.api import AnalysisDataService [as 别名]
# 或者: from mantid.api.AnalysisDataService import retrieve [as 别名]
def calculate_peak_center(self):
""" Calculate peak's center by averaging the peaks found and stored in PeakWorkspace
:return:
"""
# Go through the peak workspaces to calculate peak center with weight (monitor and counts)
peak_ws = AnalysisDataService.retrieve(self._myPeakWorkspaceName)
# spice table workspace
spice_table_name = get_spice_table_name(self._myExpNumber, self._myScanNumber)
spice_table_ws = AnalysisDataService.retrieve(spice_table_name)
pt_spice_row_dict = build_pt_spice_table_row_map(spice_table_ws)
det_col_index = spice_table_ws.getColumnNames().index('detector')
monitor_col_index = spice_table_ws.getColumnNames().index('monitor')
num_found_peaks = peak_ws.rowCount()
q_sample_sum = numpy.array([0., 0., 0.])
weight_sum = 0.
for i_peak in xrange(num_found_peaks):
# get peak
peak_i = peak_ws.getPeak(i_peak)
run_number = peak_i.getRunNumber()
# get Pt. number
pt_number = run_number % self._myScanNumber
# get row number and then detector counts and monitor counts
if pt_number not in pt_spice_row_dict:
# skip
print '[Error] Scan %d Peak %d Pt %d cannot be located.' % (self._myScanNumber, i_peak, pt_number)
continue
row_index = pt_spice_row_dict[pt_number]
det_counts = spice_table_ws.cell(row_index, det_col_index)
monitor_counts = spice_table_ws.cell(row_index, monitor_col_index)
if monitor_counts < 1.:
# skip zero-count
continue
# convert q sample from V3D to ndarray
q_i = peak_i.getQSampleFrame()
q_array = numpy.array([q_i.X(), q_i.Y(), q_i.Z()])
# calculate weight
weight_i = float(det_counts)/float(monitor_counts)
# contribute to total
weight_sum += weight_i
q_sample_sum += q_array * weight_i
# set non-normalized peak intensity as detector counts (roughly)
peak_i.setIntensity(det_counts)
# END-FOR (i_peak)
self._avgPeakCenter = q_sample_sum/weight_sum
return
示例8: test_genHKLList
# 需要导入模块: from mantid.api import AnalysisDataService [as 别名]
# 或者: from mantid.api.AnalysisDataService import retrieve [as 别名]
def test_genHKLList(self):
""" Test to load a .hkl file
"""
# Set up
alg_test = run_algorithm("CreateLeBailFitInput",
ReflectionsFile = "",
MaxHKL = "12,12,12",
FullprofParameterFile = "2011B_HR60b2.irf",
Bank = 2,
LatticeConstant = 4.66,
GenerateBraggReflections = True,
InstrumentParameterWorkspace = "PG3_Bank2_Foo2",
BraggPeakParameterWorkspace = "Arb_Peaks"
)
# Execute
self.assertTrue(alg_test.isExecuted())
# Verify some values
# Profile parameter workspace
paramws = AnalysisDataService.retrieve("PG3_Bank2_Foo2")
paramname0 = paramws.cell(0, 0)
if paramname0.lower() == "bank":
numrowgood = 28
else:
numrowgood = 27
#print "Parameter name of first line = ", paramname0
#self.assertEqual(numrowgood, paramws.rowCount())
paramnames = []
for i in range(paramws.rowCount()):
paramname = paramws.cell(i, 0)
paramnames.append(paramname)
self.assertEqual(paramnames.count("LatticeConstant"), 1)
# Bragg peak list
braggws = AnalysisDataService.retrieve("Arb_Peaks")
self.assertEqual(braggws.rowCount() > 20, True)
# 4. Delete the test hkl file
AnalysisDataService.remove("PG3_Bank2_Foo2")
AnalysisDataService.remove("Arb_Peaks")
return
示例9: test_DNSMomentumTransfer
# 需要导入模块: from mantid.api import AnalysisDataService [as 别名]
# 或者: from mantid.api.AnalysisDataService import retrieve [as 别名]
def test_DNSMomentumTransfer(self):
outputWorkspaceName = "DNSMergeRunsTest_Test4"
alg_test = run_algorithm("DNSMergeRuns", WorkspaceNames=self.workspaces,
OutputWorkspace=outputWorkspaceName, HorizontalAxis='|Q|')
self.assertTrue(alg_test.isExecuted())
# check whether the data are correct
ws = AnalysisDataService.retrieve(outputWorkspaceName)
# dimensions
self.assertEqual(96, ws.blocksize())
self.assertEqual(2, ws.getNumDims())
self.assertEqual(1, ws.getNumberHistograms())
# data array
# reference values
ttheta = np.round(np.radians(self.angles), 4)
qarr = np.sort(4.0*np.pi*np.sin(0.5*ttheta)/4.2)
# read the merged values
dataX = ws.extractX()[0]
for i in range(len(self.angles)):
self.assertAlmostEqual(qarr[i], dataX[i])
# check that the intensity has not been changed
dataY = ws.extractY()[0]
for i in range(len(dataY)):
self.assertAlmostEqual(1.0, dataY[i])
run_algorithm("DeleteWorkspace", Workspace=outputWorkspaceName)
return
示例10: test_LoadWavelength
# 需要导入模块: from mantid.api import AnalysisDataService [as 别名]
# 或者: from mantid.api.AnalysisDataService import retrieve [as 别名]
def test_LoadWavelength(self):
outputWorkspaceName = "LoadDNSLegacyTest_Test8"
filename = "dn134011vana.d_dat"
alg_test = run_algorithm("LoadDNSLegacy", Filename=filename, Normalization='no',
OutputWorkspace=outputWorkspaceName, CoilCurrentsTable=self.curtable,
Wavelength=5.7)
self.assertTrue(alg_test.isExecuted())
# Verify some values
ws = AnalysisDataService.retrieve(outputWorkspaceName)
# dimensions
self.assertEqual(24, ws.getNumberHistograms())
self.assertEqual(2, ws.getNumDims())
# data array
self.assertEqual(31461, ws.readY(1))
self.assertEqual(13340, ws.readY(23))
self.assertAlmostEqual(5.7, ws.readX(1)[0], 3)
self.assertAlmostEqual(5.7, ws.readX(23)[0], 3)
# sample logs
run = ws.getRun()
self.assertEqual(5.7, run.getProperty('wavelength').value)
self.assertAlmostEqual(2.51782, run.getProperty('Ei').value, 3)
run_algorithm("DeleteWorkspace", Workspace=outputWorkspaceName)
return
示例11: get_average_omega
# 需要导入模块: from mantid.api import AnalysisDataService [as 别名]
# 或者: from mantid.api.AnalysisDataService import retrieve [as 别名]
def get_average_omega(exp_number, scan_number):
"""Get average omega (omega-theta)
:param exp_number:
:param scan_number:
:return:
"""
# get table workspace
spice_table_name = util4.get_spice_table_name(exp_number, scan_number)
spice_table = AnalysisDataService.retrieve(spice_table_name)
# column index
col_omega_index = spice_table.getColumnNames().index('omega')
col_2theta_index = spice_table.getColumnNames().index('2theta')
# get the vectors
vec_size = spice_table.rowCount()
vec_omega = numpy.ndarray(shape=(vec_size, ), dtype='float')
vec_2theta = numpy.ndarray(shape=(vec_size, ), dtype='float')
for i_row in range(vec_size):
vec_omega[i_row] = spice_table.cell(i_row, col_omega_index)
vec_2theta[i_row] = spice_table.cell(i_row, col_2theta_index)
# END-FOR
vec_omega -= vec_2theta * 0.5
return numpy.sum(vec_omega)
示例12: test_TwoTheta
# 需要导入模块: from mantid.api import AnalysisDataService [as 别名]
# 或者: from mantid.api.AnalysisDataService import retrieve [as 别名]
def test_TwoTheta(self):
# check whether the 2theta angles the same as in the data workspace
outputWorkspaceName = "DNSDetCorrVanaTest_Test5"
# rotate detector bank to different angles
api.LoadInstrument(self.__dataws, InstrumentName='DNS')
api.LoadInstrument(self.__vanaws, InstrumentName='DNS')
api.LoadInstrument(self.__bkgrws, InstrumentName='DNS')
api.RotateInstrumentComponent(self.__dataws, "bank0", X=0, Y=1, Z=0, Angle=-7.53)
api.RotateInstrumentComponent(self.__vanaws, "bank0", X=0, Y=1, Z=0, Angle=-8.02)
api.RotateInstrumentComponent(self.__bkgrws, "bank0", X=0, Y=1, Z=0, Angle=-8.54)
# run correction
alg_test = run_algorithm("DNSDetEffCorrVana", InputWorkspace=self.__dataws.getName(),
OutputWorkspace=outputWorkspaceName, VanaWorkspace=self.__vanaws.getName(),
BkgWorkspace=self.__bkgrws.getName())
self.assertTrue(alg_test.isExecuted())
# check dimensions and angles
ws = AnalysisDataService.retrieve(outputWorkspaceName)
# dimensions
self.assertEqual(24, ws.getNumberHistograms())
self.assertEqual(2, ws.getNumDims())
# angles
tthetas = np.array([7.53 + i*5 for i in range(24)])
for i in range(24):
det = ws.getDetector(i)
self.assertAlmostEqual(tthetas[i], np.degrees(ws.detectorSignedTwoTheta(det)))
run_algorithm("DeleteWorkspace", Workspace=outputWorkspaceName)
return
示例13: decode
# 需要导入模块: from mantid.api import AnalysisDataService [as 别名]
# 或者: from mantid.api.AnalysisDataService import retrieve [as 别名]
def decode(self, obj_dic, project_path=None):
"""
Decode a InstrumentView Dictionary from project Save and return the object created
:param obj_dic: Dict; A dictionary containing the information for an InstrumentView
:param project_path: String; The location of the project save location
:return: InstrumentView's View; The View object with correct state is returned.
"""
load_mask = True
if obj_dic is None:
return None
if project_path is None:
project_path = ""
load_mask = False
# Make the widget
ws = ADS.retrieve(obj_dic["workspaceName"])
instrument_view = InstrumentViewPresenter(ws).container
instrument_widget = instrument_view.widget
# Then 'decode' set the values from the dictionary
self.widget_decoder.decode(obj_dic, instrument_widget, project_path, load_mask)
# Show the end result
return instrument_view
示例14: _hasWorkspaceID
# 需要导入模块: from mantid.api import AnalysisDataService [as 别名]
# 或者: from mantid.api.AnalysisDataService import retrieve [as 别名]
def _hasWorkspaceID(workspace_name, workspace_id):
"""Check that a workspace has the given type"""
workspace = AnalysisDataService.retrieve(workspace_name)
if isinstance(workspace, WorkspaceGroup):
return workspace[0].id() == workspace_id
else:
return workspace.id() == workspace_id
示例15: __init__
# 需要导入模块: from mantid.api import AnalysisDataService [as 别名]
# 或者: from mantid.api.AnalysisDataService import retrieve [as 别名]
def __init__(self, data_file, workspace_name=None):
self.errors = []
if HAS_MANTID:
try:
if workspace_name is None:
self.data_ws = "__raw_data_file"
else:
self.data_ws = str(workspace_name)
api.HFIRLoad(Filename=str(data_file), OutputWorkspace=self.data_ws)
ws = AnalysisDataService.retrieve(self.data_ws)
x = ws.dataX(0)
self.wavelength = (x[0]+x[1])/2.0
self.wavelength_spread = x[1]-x[0]
self.sample_detector_distance = ws.getRun().getProperty("sample-detector-distance").value
self.sample_detector_distance_offset = ws.getRun().getProperty("sample-detector-distance-offset").value
self.sample_si_window_distance = ws.getRun().getProperty("sample-si-window-distance").value
self.sample_detector_distance_moved = ws.getRun().getProperty("sample_detector_distance").value
self.sample_thickness = ws.getRun().getProperty("sample-thickness").value
self.beam_diameter = ws.getRun().getProperty("beam-diameter").value
logger.notice("Loaded data file: %s" % data_file)
except:
logger.error("Error loading data file:\n%s" % sys.exc_value)
self.errors.append("Error loading data file:\n%s" % sys.exc_value)