本文整理汇总了C#中StudyStorageLocation.LoadStudyXml方法的典型用法代码示例。如果您正苦于以下问题:C# StudyStorageLocation.LoadStudyXml方法的具体用法?C# StudyStorageLocation.LoadStudyXml怎么用?C# StudyStorageLocation.LoadStudyXml使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类StudyStorageLocation
的用法示例。
在下文中一共展示了StudyStorageLocation.LoadStudyXml方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: ValidateStudyState
/// <summary>
/// Validates the state of the study.
/// </summary>
/// <param name="context">Name of the application</param>
/// <param name="studyStorage">The study to validate</param>
/// <param name="modes">Specifying what validation to execute</param>
public void ValidateStudyState(String context, StudyStorageLocation studyStorage, StudyIntegrityValidationModes modes)
{
Platform.CheckForNullReference(studyStorage, "studyStorage");
if (modes == StudyIntegrityValidationModes.None)
return;
using (ServerExecutionContext scope = new ServerExecutionContext())
{
Study study = studyStorage.LoadStudy(scope.PersistenceContext);
if (study!=null)
{
StudyXml studyXml = studyStorage.LoadStudyXml();
if (modes == StudyIntegrityValidationModes.Default ||
(modes & StudyIntegrityValidationModes.InstanceCount) == StudyIntegrityValidationModes.InstanceCount)
{
if (studyXml != null && studyXml.NumberOfStudyRelatedInstances != study.NumberOfStudyRelatedInstances)
{
ValidationStudyInfo validationStudyInfo = new ValidationStudyInfo(study, studyStorage.ServerPartition);
throw new StudyIntegrityValidationFailure(
ValidationErrors.InconsistentObjectCount, validationStudyInfo,
String.Format("Number of instances in database and xml do not match: {0} vs {1}.",
study.NumberOfStudyRelatedInstances,
studyXml.NumberOfStudyRelatedInstances
));
}
}
}
}
}
示例2: UpdateStudySizeInDBCommand
public UpdateStudySizeInDBCommand(StudyStorageLocation location)
: base("Update Study Size In DB", true)
{
_location = location;
// this may take a few ms so it's better to do it here instead in OnExecute()
StudyXml studyXml = _location.LoadStudyXml();
_studySizeInKB = studyXml.GetStudySize() / KB;
}
示例3: Validate
/// <summary>
/// Validates the specified <see cref="StudyStorageLocation"/>.
/// </summary>
/// <param name="storageLocation">The study to be validated</param>
/// <param name="validationLevels">Set to </param>
public void Validate(StudyStorageLocation storageLocation, ValidationLevels validationLevels)
{
StudyXml studyXml = storageLocation.LoadStudyXml();
Study study = storageLocation.Study;
ServerPartition partition = storageLocation.ServerPartition;
if ((validationLevels & ValidationLevels.Study) == ValidationLevels.Study)
{
DoStudyLevelValidation(storageLocation, studyXml, study, partition);
}
if ( (validationLevels & ValidationLevels.Series) == ValidationLevels.Series)
{
DoSeriesLevelValidation(storageLocation, studyXml, study);
}
}
示例4: DoMigrateStudy
/// <summary>
/// Migrates the study to new tier
/// </summary>
/// <param name="storage"></param>
/// <param name="newFilesystem"></param>
private void DoMigrateStudy(StudyStorageLocation storage, ServerFilesystemInfo newFilesystem)
{
Platform.CheckForNullReference(storage, "storage");
Platform.CheckForNullReference(newFilesystem, "newFilesystem");
TierMigrationStatistics stat = new TierMigrationStatistics {StudyInstanceUid = storage.StudyInstanceUid};
stat.ProcessSpeed.Start();
StudyXml studyXml = storage.LoadStudyXml();
stat.StudySize = (ulong) studyXml.GetStudySize();
Platform.Log(LogLevel.Info, "About to migrate study {0} from {1} to {2}",
storage.StudyInstanceUid, storage.FilesystemTierEnum, newFilesystem.Filesystem.Description);
string newPath = Path.Combine(newFilesystem.Filesystem.FilesystemPath, storage.PartitionFolder);
DateTime startTime = Platform.Time;
DateTime lastLog = Platform.Time;
int fileCounter = 0;
ulong bytesCopied = 0;
long instanceCountInXml = studyXml.NumberOfStudyRelatedInstances;
using (ServerCommandProcessor processor = new ServerCommandProcessor("Migrate Study"))
{
TierMigrationContext context = new TierMigrationContext
{
OriginalStudyLocation = storage,
Destination = newFilesystem
};
string origFolder = context.OriginalStudyLocation.GetStudyPath();
processor.AddCommand(new CreateDirectoryCommand(newPath));
newPath = Path.Combine(newPath, context.OriginalStudyLocation.StudyFolder);
processor.AddCommand(new CreateDirectoryCommand(newPath));
newPath = Path.Combine(newPath, context.OriginalStudyLocation.StudyInstanceUid);
// don't create this directory so that it won't be backed up by MoveDirectoryCommand
CopyDirectoryCommand copyDirCommand = new CopyDirectoryCommand(origFolder, newPath,
delegate (string path)
{
// Update the progress. This is useful if the migration takes long time to complete.
FileInfo file = new FileInfo(path);
bytesCopied += (ulong)file.Length;
fileCounter++;
if (file.Extension != null && file.Extension.Equals(ServerPlatform.DicomFileExtension, StringComparison.InvariantCultureIgnoreCase))
{
TimeSpan elapsed = Platform.Time - lastLog;
TimeSpan totalElapsed = Platform.Time - startTime;
double speedInMBPerSecond = 0;
if (totalElapsed.TotalSeconds > 0)
{
speedInMBPerSecond = (bytesCopied / 1024f / 1024f) / totalElapsed.TotalSeconds;
}
if (elapsed > TimeSpan.FromSeconds(WorkQueueSettings.Instance.TierMigrationProgressUpdateInSeconds))
{
#region Log Progress
StringBuilder stats = new StringBuilder();
if (instanceCountInXml != 0)
{
float pct = (float)fileCounter / instanceCountInXml;
stats.AppendFormat("{0} files moved [{1:0.0}MB] since {2} ({3:0}% completed). Speed={4:0.00}MB/s",
fileCounter, bytesCopied / 1024f / 1024f, startTime, pct * 100, speedInMBPerSecond);
}
else
{
stats.AppendFormat("{0} files moved [{1:0.0}MB] since {2}. Speed={3:0.00}MB/s",
fileCounter, bytesCopied / 1024f / 1024f, startTime, speedInMBPerSecond);
}
Platform.Log(LogLevel.Info, "Tier migration for study {0}: {1}", storage.StudyInstanceUid, stats.ToString());
try
{
using (IUpdateContext ctx = PersistentStoreRegistry.GetDefaultStore().OpenUpdateContext(UpdateContextSyncMode.Flush))
{
IWorkQueueEntityBroker broker = ctx.GetBroker<IWorkQueueEntityBroker>();
WorkQueueUpdateColumns parameters = new WorkQueueUpdateColumns
{FailureDescription = stats.ToString()};
broker.Update(WorkQueueItem.GetKey(), parameters);
ctx.Commit();
}
}
catch
{
// can't log the progress so far... just ignore it
}
finally
{
lastLog = DateTime.Now;
}
#endregion
}
//.........这里部分代码省略.........
示例5: ProcessStudy
/// <summary>
/// Reprocess a specific study.
/// </summary>
/// <param name="partition">The ServerPartition the study is on.</param>
/// <param name="location">The storage location of the study to process.</param>
/// <param name="engine">The rules engine to use when processing the study.</param>
/// <param name="postArchivalEngine">The rules engine used for studies that have been archived.</param>
/// <param name="dataAccessEngine">The rules engine strictly used for setting data acess.</param>
protected static void ProcessStudy(ServerPartition partition, StudyStorageLocation location, ServerRulesEngine engine, ServerRulesEngine postArchivalEngine, ServerRulesEngine dataAccessEngine)
{
if (!location.QueueStudyStateEnum.Equals(QueueStudyStateEnum.Idle) || !location.AcquireWriteLock())
{
Platform.Log(LogLevel.Error, "Unable to lock study {0}. The study is being processed. (Queue State: {1})", location.StudyInstanceUid,location.QueueStudyStateEnum.Description);
}
else
{
try
{
DicomFile msg = LoadInstance(location);
if (msg == null)
{
Platform.Log(LogLevel.Error, "Unable to load file for study {0}", location.StudyInstanceUid);
return;
}
bool archiveQueueExists;
bool archiveStudyStorageExists;
bool filesystemDeleteExists;
using (IReadContext read = PersistentStoreRegistry.GetDefaultStore().OpenReadContext())
{
// Check for existing archive queue entries
var archiveQueueBroker = read.GetBroker<IArchiveQueueEntityBroker>();
var archiveQueueCriteria = new ArchiveQueueSelectCriteria();
archiveQueueCriteria.StudyStorageKey.EqualTo(location.Key);
archiveQueueExists = archiveQueueBroker.Count(archiveQueueCriteria) > 0;
var archiveStorageBroker = read.GetBroker<IArchiveStudyStorageEntityBroker>();
var archiveStudyStorageCriteria = new ArchiveStudyStorageSelectCriteria();
archiveStudyStorageCriteria.StudyStorageKey.EqualTo(location.Key);
archiveStudyStorageExists = archiveStorageBroker.Count(archiveStudyStorageCriteria) > 0;
var filesystemQueueBroker = read.GetBroker<IFilesystemQueueEntityBroker>();
var filesystemQueueCriteria = new FilesystemQueueSelectCriteria();
filesystemQueueCriteria.StudyStorageKey.EqualTo(location.Key);
filesystemQueueCriteria.FilesystemQueueTypeEnum.EqualTo(FilesystemQueueTypeEnum.DeleteStudy);
filesystemDeleteExists = filesystemQueueBroker.Count(filesystemQueueCriteria) > 0;
}
using (var commandProcessor = new ServerCommandProcessor("Study Rule Processor"))
{
var context = new ServerActionContext(msg, location.FilesystemKey, partition, location.Key, commandProcessor);
// Check if the Study has been archived
if (archiveStudyStorageExists && !archiveQueueExists && !filesystemDeleteExists)
{
// Add a command to delete the current filesystemQueue entries, so that they can
// be reinserted by the rules engine.
context.CommandProcessor.AddCommand(new DeleteFilesystemQueueCommand(location.Key, ServerRuleApplyTimeEnum.StudyArchived));
// How to deal with exiting FilesystemQueue entries is problematic here. If the study
// has been migrated off tier 1, we probably don't want to modify the tier migration
// entries. Compression entries may have been entered when the Study was initially
// processed, we don't want to delete them, because they might still be valid.
// We just re-run the rules engine at this point, and delete only the StudyPurge entries,
// since those we know at least would only be applied for archived studies.
var studyRulesEngine = new StudyRulesEngine(postArchivalEngine, location, location.ServerPartition, location.LoadStudyXml());
studyRulesEngine.Apply(ServerRuleApplyTimeEnum.StudyArchived, commandProcessor);
// Post Archive doesn't allow data access rules. Force Data Access rules to be reapplied
// to these studies also.
dataAccessEngine.Execute(context);
}
else
{
// Add a command to delete the current filesystemQueue entries, so that they can
// be reinserted by the rules engine.
context.CommandProcessor.AddCommand(new DeleteFilesystemQueueCommand(location.Key,ServerRuleApplyTimeEnum.StudyProcessed));
// Execute the rules engine, insert commands to update the database into the command processor.
// Due to ticket #11673, we create a new rules engine instance for each study, since the Study QC rules
// don't work right now with a single rules engine.
//TODO CR (Jan 2014) - Check if we can go back to caching the rules engine to reduce database hits on the rules
var studyRulesEngine = new StudyRulesEngine(location, location.ServerPartition, location.LoadStudyXml());
studyRulesEngine.Apply(ServerRuleApplyTimeEnum.StudyProcessed, commandProcessor);
}
// Do the actual database updates.
if (false == context.CommandProcessor.Execute())
{
Platform.Log(LogLevel.Error, "Unexpected failure processing Study level rules for study {0}", location.StudyInstanceUid);
}
// Log the FilesystemQueue related entries
location.LogFilesystemQueue();
}
}
finally
{
location.ReleaseWriteLock();
//.........这里部分代码省略.........