本文整理汇总了Python中WMCore.Database.CMSCouch.Database.document方法的典型用法代码示例。如果您正苦于以下问题:Python Database.document方法的具体用法?Python Database.document怎么用?Python Database.document使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类WMCore.Database.CMSCouch.Database
的用法示例。
在下文中一共展示了Database.document方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import document [as 别名]
def main():
print "Looking for problematic inbox elements..."
problemRequests = getProblematicRequests()
print "Found %d bad elements:" % len(problemRequests)
if not problemRequests:
print "Nothing to fix, contact a developer if the problem persists..."
return 0
for request in problemRequests:
print request["RequestName"]
var = raw_input("Can we close these for new data in inbox elements: Y/N\n")
if var == "Y":
print "Updating them in global inbox, you need a WMAgent proxy for this."
inboxDB = Database('workqueue_inbox', 'https://cmsweb.cern.ch/couchdb')
for request in problemRequests:
inboxDB.document(request._id)
inboxDB.updateDocument(request._id, 'WorkQueue', 'in-place', fields={'OpenForNewData': false})
print "Done with the deletions, this should fix the problem."
return 0
else:
var = raw_input("Then can we delete these inbox elements: Y/N\n")
if var == "Y":
print "Deleting them from the global inbox, you need a WMAgent proxy for this."
inboxDB = Database('workqueue_inbox', 'https://cmsweb.cern.ch/couchdb')
for request in problemRequests:
inboxDB.delete_doc(request._id, request.rev)
print "Done with the deletions, this should fix the problem."
return 0
else:
print "Doing nothing as you commanded..."
return 0
示例2: main
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import document [as 别名]
def main():
config = loadConfigurationFile(os.environ['WMAGENT_CONFIG'])
config.CoreDatabase.dialect = 'oracle'
init = WMInit()
init.setDatabaseConnection(config.CoreDatabase.connectUrl,
config.CoreDatabase.dialect)
couchDB = Database('wmagent_jobdump/fwjrs', '')
couchDB2 = Database('wmagent_jobdump/jobs', '')
myThread = threading.currentThread()
daofactory = DAOFactory(package = "WMCore.WMBS",
logger = logging,
dbinterface = myThread.dbi)
getJobsDAO = daofactory(classname = "Jobs.GetAllJobs")
completedJobs = getJobsDAO.execute(state = 'complete')
candidates = []
while len(completedJobs):
candidates = []
chunk = completedJobs[:500]
completedJobs = completedJobs[500:]
result = couchDB.loadView('FWJRDump', 'outputByJobID', keys = chunk)
rows = result['rows']
for entry in rows:
candidates.append(entry['key'])
for jobId in candidates:
doc = couchDB2.document(str(jobId))
last = max(map(int, doc['states'].keys()))
lastState = doc['states'][str(last)]['newstate']
if lastState == 'success':
print jobId
示例3: requestDetails
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import document [as 别名]
def requestDetails(requestName):
""" Adds details from the Couch document as well as the database """
WMCore.Lexicon.identifier(requestName)
request = GetRequest.getRequestDetails(requestName)
helper = loadWorkload(request)
schema = helper.data.request.schema.dictionary_whole_tree_()
# take the stuff from the DB preferentially
schema.update(request)
task = helper.getTopLevelTask()[0]
schema['Site Whitelist'] = task.siteWhitelist()
schema['Site Blacklist'] = task.siteBlacklist()
schema['MergedLFNBase'] = str(helper.getMergedLFNBase())
schema['UnmergedLFNBase'] = str(helper.getUnmergedLFNBase())
schema['Campaign'] = str(helper.getCampaign())
schema['AcquisitionEra'] = str(helper.getAcquisitionEra())
if schema['SoftwareVersions'] == ['DEPRECATED']:
schema['SoftwareVersions'] = helper.getCMSSWVersions()
# Check in the CouchWorkloadDBName if not present
schema.setdefault("CouchWorkloadDBName", "reqmgr_workload_cache")
# get DbsUrl from CouchDB
if schema.get("CouchWorkloadDBName", None) and schema.get("CouchURL", None):
couchDb = Database(schema["CouchWorkloadDBName"], schema["CouchURL"])
couchReq = couchDb.document(requestName)
schema["DbsUrl"] = couchReq.get("DbsUrl", None)
# https://github.com/dmwm/WMCore/issues/4588
schema["SubscriptionInformation"] = helper.getSubscriptionInformation()
return schema
示例4: checkForMissingFiles
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import document [as 别名]
def checkForMissingFiles(options):
#Initialize stuff
phedexAPI = PhEDEx({'cachepath' : options.cachepath})
acdcCouch = Database('wmagent_acdc', options.acdcUrl)
#Let's get the IDs of the ACDC documents for the task/request/group/user
array = [options.group, options.user, options.request, options.task]
result = acdcCouch.loadView('ACDC', 'owner_coll_fileset_docs', {'reduce' : False}, [array])
documentsIDs = [x['id'] for x in result['rows']]
badFiles = {}
#Go through the documents
for docID in documentsIDs:
doc = acdcCouch.document(docID)
#Are we going to change this doc? Better back it up
if options.change:
backupFile = os.open(os.path.join(options.backup, "%s.bkp" % doc["_id"]), 'w')
json.dump(doc, backupFile)
backupFile.close()
#Go through the files
files = doc["files"]
for inputFile in files:
#Use PhEDEx API to get site based on the SE
se = files[inputFile]["locations"][0]
siteLocation = phedexAPI.getBestNodeName(se)
#Now get the PFN
pfnDict = phedexAPI.getPFN(siteLocation, inputFile)
inputPfn = pfnDict[(siteLocation, inputFile)]
#Run lcg-ls commands and see what we get
command = 'lcg-ls -b -D srmv2 --srm-timeout 60 %s' % inputPfn
commandList = shlex.split(command)
try:
(stdout, stderr, exitCode) = runCommand(commandList, False, 70)
except Exception, ex:
exitCode = 99999
stdout = ''
stderr = str(ex)
if exitCode:
#Something went wrong with the command
#Mark the file as bad
if docID not in badFiles:
badFiles[docID] = []
badFiles[docID].append(inputFile)
print 'File %s is thought to be bad' % inputFile
print 'Command was %s' % command
print 'Return code was %i' % exitCode
print 'Stdout was %s' % stdout
print 'Stderr was %s' % stderr
示例5: main
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import document [as 别名]
def main():
reader = WMStatsReader("http://dummy.cern.ch:5984", "wmagent_summary")
wmstats = Database('wmagent_summary', 'http://dummy.cern.ch:5984')
suspiciousWorkflows = reader.workflowsByStatus(["Processing Done"], stale = False)
for entry in suspiciousWorkflows:
requestDoc = wmstats.document(entry)
statusList = requestDoc['request_status']
if statusList[-2]['status'] == 'normal-archived':
statusList = statusList[:-1]
requestDoc['request_status'] = statusList
wmstats.queue(requestDoc)
wmstats.commit()
示例6: main
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import document [as 别名]
def main():
if len(sys.argv) < 2:
print ("Takes 1 input argument - dump of Oracle reqmgr_request "
"table in a Python dictionary.")
sys.exit(1)
print "Creating database connection ..."
# couch_server = CouchServer(couch_url)
db = Database(couch_db_name, couch_url)
execfile(sys.argv[1], globals())
oracle_requests = reqmgr_request # read from the input file
print "Oracle requests: %s" % len(oracle_requests)
print "Retrieving data from CouchDB ..."
couch_requests = db.allDocs()
couch_request_names = []
for row in couch_requests["rows"]:
if row["id"].startswith("_design"): continue
couch_request_names.append(row["id"])
print "CouchDB requests: %s" % len(couch_request_names)
print "Comparing Oracle and CouchDB requests ..."
not_present_in_couch = []
for request in oracle_requests:
oracle_request_name = request["REQUEST_NAME"]
# remove first occurrence of value. Raises ValueError if not present
try:
couch_request_names.remove(oracle_request_name)
except ValueError:
not_present_in_couch.append(oracle_request_name)
print "CouchDB requests not present in Oracle:"
print "%s requests" % len(couch_request_names)
for name in couch_request_names:
request = db.document(name)
if name != request["RequestName"] or name != request["_id"]:
print ("\t Mismatch: CouchDB id: '%s' RequestName: '%s' name: '%s'" %
(request["_id"], request["RequestName"], name))
print "%s %s %s" % (request["RequestName"], request["RequestType"],
request["RequestStatus"])
print "\n\n"
print "Oracle requests not present in CouchDB:"
print "%s requests" % len(not_present_in_couch)
for name in not_present_in_couch:
print name
示例7: findParentJobs
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import document [as 别名]
def findParentJobs(jobId):
# Connect to the Job and FWJR DBs
jobDB = Database('wmagent_jobdump/jobs', 'http://dummy.cern.ch:5984')
fwjrDB = Database('wmagent_jobdump/fwjrs', 'http://dummy.cern.ch:5984')
# Get the document of the child job
childJobDoc = jobDB.document(id = jobId)
# Get the workflow and input files, transforms it into suitable keys [workflow, lfn]
workflow = childJobDoc['workflow']
inputLfns = [x['lfn'] for x in childJobDoc['inputfiles']]
keys = [[workflow, x] for x in inputLfns]
# Get the jobs that produced the input files for this job
# Load the id and fwjr for these jobs since we have to re-run them
result = fwjrDB.loadView('FWJRDump', 'jobsByOutputLFN', {}, keys)
for entry in result['rows']:
key = entry['key']
jobId = entry['value']
fwjrId = entry['id']
result = fwjrDB.loadView('FWJRDump', 'logArchivesByJobID', {}, [[int(x) for x in fwjrId.split('-')]])
logArch = result['rows'][0]['value']['lfn']
# Check whether the logArch is in some LogCollect
logCollectTarball = ''
result = jobDB.loadView('JobDump', 'jobsByInputLFN', {}, [[workflow, logArch]])
if result['rows']:
logCollectJobId = result['rows'][0]['id']
result = fwjrDB.loadView('FWJRDump', 'outputByJobID', {}, [int(logCollectJobId)])
if result['rows']:
logCollectTarball = result['rows'][0]['value']['lfn']
else:
print "WARNING: The logArchive for job %s was in a LogCollect job but not tarball was produced" % jobId
# Print out the information
print "Job %s produced %s, the logArch for it is %s in %s" % (jobId, key[1], logArch, logCollectTarball)
return
示例8: update_software
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import document [as 别名]
def update_software(config_file):
"""
Functions retrieves CMSSW versions and scramarchs from CMS tag collector.
"""
config = loadConfigurationFile(config_file)
# source of the data
tag_collector_url = config.views.data.tag_collector_url
# store the data into CouchDB auxiliary database under "software" document
couch_host = config.views.data.couch_host
reqmgr_aux_db = config.views.data.couch_reqmgr_aux_db
# get data from tag collector
all_archs_and_versions = _get_all_scramarchs_and_versions(tag_collector_url)
if not all_archs_and_versions:
return
# get data already stored in CouchDB
couchdb = Database(dbname=reqmgr_aux_db, url=couch_host)
try:
sw_already_stored = couchdb.document("software")
del sw_already_stored["_id"]
del sw_already_stored["_rev"]
except CouchNotFoundError:
logging.error("Document id software, does not exist, creating it ...")
doc = Document(id="software", inputDict=all_archs_and_versions)
couchdb.commitOne(doc)
return
# now compare recent data from tag collector and what we already have stored
# sorting is necessary
if sorted(all_archs_and_versions) != sorted(sw_already_stored):
logging.debug("ScramArch/CMSSW releases changed, updating software document ...")
doc = Document(id="software", inputDict=all_archs_and_versions)
couchdb.commitOne(doc)
"""
示例9: dump
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import document [as 别名]
def dump(full_dump=False, fields=None):
print("Querying fields: %s\n\n" % fields)
db = Database(couch_db_name, couch_url)
couch_requests = db.allDocs()
doc_counter = 0
for row in couch_requests["rows"]:
if row["id"].startswith("_design"): continue
doc = db.document(row["id"])
if fields:
s = ''
for f in fields:
try:
s += "%s:%s " % (f, doc[f])
except KeyError:
s += "%s:n/a " % f
print("%s %s\n" % (s, doc["RequestName"]))
elif full_dump:
print("%s\n%s\n%s\n" % (row["id"], doc, 70*'-'))
else:
print(row["id"])
doc_counter += 1
#if doc_counter > 100:
# break
print("Total documents: %s" % doc_counter)
示例10: DatabaseNotFoundException
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import document [as 别名]
class database:
logger = logfactory
class DatabaseNotFoundException(Exception):
def __init__(self, db=''):
self.db = str(db)
database.logger.error('Database "%s" was not found.' % (self.db), level='critical')
def __str__(self):
return 'Error: Database ', self.db, ' was not found.'
class DatabaseAccessError(Exception):
def __init__(self, db=''):
self.db = str(db)
database.logger.error('Could not access database "%s".' % (self.db), level='critical')
def __str__(self):
return 'Error: Could not access database ', self.db
class DocumentNotFoundException(Exception):
def __init__(self, name=''):
self.name = name
database.logger.error('Document "%s" was not found.' % (self.name))
def __str__(self):
return 'Error: Document ', self.name, ' was not found.'
class MapReduceSyntaxError(Exception):
def __init__(self, query=''):
self.query = query
database.logger.error('Invalid query <%s>' % (self.query))
def __str__(self):
return 'Error: Invalid query "' + self.query + '"'
class InvalidOperatorError(Exception):
def __init__(self, op=''):
self.op = str(op)
def __str__(self):
return 'Error: Operator "' + self.op + '" is invalid.'
class InvalidParameterError(Exception):
def __init__(self, param=''):
self.param = str(param)
def __str__(self):
return 'Error: Invalid Parameter: ' + self.param
cache_dictionary = defaultdict(lambda: None)
def __init__(self, db_name='',url=None, cache=False):
host = os.environ['HOSTNAME']
if url == None:
url =locator().dbLocation()
#self.logger.log('I chose the url %s'%(url))
if not db_name:
raise self.DatabaseNotFoundException(db_name)
self.db_name = db_name
self.cache = cache
if self.db_name in ['campaigns','chained_campaigns']:
## force cache for those.
self.cache=True
try:
self.db = Database(db_name, url=url)
# self.db = Database(db_name, url='http://preptest.cern.ch:5984/')
# self.db = Database(db_name) # for using private DB @localhost:5984
except ValueError as ex:
raise self.DatabaseAccessError(db_name)
self.allowed_operators = ['<=', '<', '>=', '>', '==', '~=']
def __is_number(self, s):
try:
float(s)
return True
except ValueError:
return False
def get(self, prepid=''):
if self.cache:
result = self.__get_from_cache(prepid)
if result: return result
self.logger.log('Looking for document "%s" in "%s"...' % (prepid,self.db_name))
try:
doc = self.db.document(id=prepid)
if self.cache:
self.__save_to_cache( prepid, doc)
return doc
except Exception as ex:
self.logger.error('Document "%s" was not found. Reason: %s' % (prepid, ex))
return {}
def __save_to_cache(self, key, value):
from tools.locker import locker
with locker.lock(key):
self.cache_dictionary[key]=value
def __get_from_cache(self, key):
from tools.locker import locker
with locker.lock(key):
#.........这里部分代码省略.........
示例11: ContinuousSummaryHistogramTest
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import document [as 别名]
class ContinuousSummaryHistogramTest(unittest.TestCase):
def setUp(self):
"""
_setUp_
Setup a couch database for testing
of produced JSON
"""
self.testInit = TestInit(__file__)
self.testInit.setLogging()
self.testInit.setupCouch("histogram_dump_t")
random.seed()
self.histogramDB = Database(dbname = "histogram_dump_t")
def tearDown(self):
"""
_tearDown_
Clean the couch
"""
self.testInit.tearDownCouch()
def buildRandomNumberList(self, n, distribution = "normalvariate", **kwargs):
"""
_buildRandomNumberList_
Builds a list with n pseudorandomly distributed
numbers according to some given distribution
"""
numberList = []
if not kwargs:
kwargs = {"mu" : 0, "sigma" : 1}
for _ in range(n):
generator = getattr(random, distribution)
numberList.append(generator(**kwargs))
return numberList
def testA_BasicTest(self):
"""
_testA_BasicTest_
Build a histogram from a set of uniformly
distributed pseudorandom numbers. Check
that the statistic properties
in the histogram are accurate to some degree,
that the histogram binning is done right and
that this can become a document an uploaded to couch
"""
inputData = self.buildRandomNumberList(1000)
histogram = ContinuousSummaryHistogram('TestHisto', 'MyLabel', 'SomeoneElsesLabel')
# Populate the histogram
for point in inputData:
histogram.addPoint(point)
# Get the JSON
jsonHistogram = histogram.toJSON()
# Check the histogram core data
self.assertEqual(jsonHistogram["title"], "TestHisto")
self.assertEqual(jsonHistogram["xLabel"], "MyLabel")
self.assertAlmostEqual(jsonHistogram["average"], 0.0, places = 0)
self.assertAlmostEqual(jsonHistogram["stdDev"], 1.0, places = 0)
self.assertEqual(len(jsonHistogram["data"]), 16)
self.assertTrue(jsonHistogram["continuous"])
# Check the internal data
self.assertEqual(jsonHistogram["internalData"]["yLabel"], "SomeoneElsesLabel")
self.assertEqual(jsonHistogram["internalData"]["nPoints"], 1000)
# Try to commit it to couch
jsonHistogram["_id"] = jsonHistogram["title"]
self.histogramDB.commitOne(jsonHistogram)
storedJSON = self.histogramDB.document("TestHisto")
self.assertEqual(len(storedJSON["data"]), 16)
return
def testB_extremeData(self):
"""
_testB_extremeData_
Put extreme points in the data and try to build a histogram.
Check that it can process all this correctly
"""
# First no data
histogram = ContinuousSummaryHistogram('TestHisto', 'MyLabel', 'SomeoneElsesLabel')
jsonHistogram = histogram.toJSON()
self.assertEqual(jsonHistogram["title"], "TestHisto")
self.assertEqual(jsonHistogram["xLabel"], "MyLabel")
self.assertEqual(jsonHistogram["average"], 0.0)
self.assertEqual(jsonHistogram["stdDev"], 0.0)
self.assertEqual(len(jsonHistogram["data"]), 0)
#.........这里部分代码省略.........
示例12: main
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import document [as 别名]
def main():
if len(sys.argv) < 2:
print("Missing the connect Oracle TNS argument (user/[email protected]).")
sys.exit(1)
tns = sys.argv[1]
print("Creating CouchDB database connection ...")
couchdb = Database(couchdb_name, couch_url)
print("Creating Oracle database connection ...")
oradb = cx_Oracle.Connection(tns)
num_couch_requests = get_couchdb_row_count(couchdb)
print("Total CouchDB request documents in ReqMgr: %s" % num_couch_requests)
num_oracle_requests = get_oracle_row_count(oradb, "reqmgr_request")
print("Total Oracle requests entries in ReqMgr: %s" % num_oracle_requests)
if num_couch_requests != num_oracle_requests:
print("Number of requests in Oracle, CouchDB don't agree, fix that first.")
sys.exit(1)
else:
print("Database cross-check (Oracle request names vs CouchDB): DONE, THE SAME.")
def get_couch_value(couch_req, mapping):
try:
c = couch_req[mapping["couch"]]
couch_missing = False
except KeyError:
# comparison will not happen due to missing flag, the value
# will be stored in couch
c = "N/A"
couch_missing = False
return str(c), couch_missing
def check_oracle_worflow_value(oracle_value, mapping, req_name):
# check Oracle WORKFLOW value
if mapping["oracle"] == "WORKFLOW":
# https://cmsweb.cern.ch/couchdb/reqmgr_workload_cache/linacre_2011A_442p2_DataReprocessingMuOnia_111119_005717/spec
from_wf_url_req_name = oracle_value.rsplit('/', 2)[-2]
if req_name != from_wf_url_req_name:
print("Workflow URL mismatch: %s" % o)
sys.exit(1)
counter = 0
for oracle_req in get_oracle_data(oradb):
req_name = oracle_req["REQUEST_NAME"]
# FILTER
# check only requests injected approx. after last deployment (a lot of
# stuff should have already been fixed in ReqMgr)
# _13041._*$ (ending of request name with date/time)
#if not re.match(".*_1304[0-3][0-9]_.*$", req_name): # all April 2013
# continue
counter += 1
print("\n\n%s (%s)" % (req_name, counter))
couch_req = couchdb.document(req_name)
couch_fields_to_correct = {}
for mapping in MAPPING:
if mapping["couch"] in COUCH_TO_IGNORE:
continue
o = str(oracle_req[mapping["oracle"]])
c, couch_missing = get_couch_value(couch_req, mapping)
check_oracle_worflow_value(o, mapping, req_name)
# compare oracle and couch values
# don't update value in couch if it exists and is non-empty
if (couch_missing or o != c) and c in ('None', '0', '', "N/A"):
print("%s %s != %s" % (mapping, o, c))
# correct couch request by oracle value
couch_fields_to_correct[mapping["couch"]] = o
if couch_fields_to_correct:
print("Couch corrected fields:")
print(couch_fields_to_correct)
if sys.argv[-1] == "-c":
couchdb.updateDocument(req_name, "ReqMgr", "updaterequest",
fields=couch_fields_to_correct, useBody=True)
print("Couch updated")
else:
print("OK")
# fields that should be removed from couch
"""
示例13: OpsClipboardTest
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import document [as 别名]
class OpsClipboardTest(unittest.TestCase):
def setUp(self):
# For experiments with CouchDB content it's useful when the docs
# remain the the database by commenting out tearDownCouch statement.
# If the database exists at this point, tearDownCouch was probably
# commented out, so do not drop the database
#self.testInit = TestInitCouchApp(__file__, dropExistingDb=False)
self.testInit = TestInitCouchApp(__file__, dropExistingDb=True)
self.testInit.setLogging()
self.testInit.setDatabaseConnection()
dbName = "opsclipboard_t"
self.testInit.setupCouch(dbName, "OpsClipboard")
# the tests uses httplib2 for accessing the OpsClipboard couchapp to
# emulate web browser access rather than direct REST access
# couch attribute is only used for back verification of written/modified data
#couchServer = CouchServer(os.environ["COUCHURL"])
#self.configDatabase = couchServer.connectDatabase(dbName)
# used to verify written / modified data in CouchDB
self.couch = Database(dbName, self.testInit.couchUrl)
def tearDown(self):
# comment out to see stuff remaining in the database
self.testInit.tearDownCouch() # self.testInit.couch gets None-ed here
#pass
def _inject(self, numRequests):
# This creates 10 documents using the test data above representing
# 10 requests belonging to two campaigns that have just been placed
# into the "ops-hold" into the ReqMgr.
# Whenever a request enters the "ops-hold" state, the dict containing the
# request params should be added to the OpsClipboard using the
# inject API call (see Assign.py):
requests, campaignIds, requestIds = getTestRequests(numRequests)
OpsClipboard.inject(self.testInit.couchUrl, self.testInit.couchDbName, *requests)
return requests, campaignIds, requestIds
def _getViewResults(self, viewName, options = {}):
"""
Query CouchDB viewName, return rows.
"""
try:
result = self.couch.loadView("OpsClipboard", viewName, options)
except Exception as ex:
msg = "Error loading OpsClipboard view: '%s', reason:%s\n" % (viewName, ex)
self.fail(msg)
return result[u"rows"]
def testA_view_all(self):
"""
Testing the 'all' view.
"""
self._inject(10) # creates 10 documents
# Now read back data for the test requests and verify
# that we have 10 requests in the OpsClipboard
# all view returns all requests in the OpsClipboard
allRequests = self._getViewResults("all")
self.assertEqual(len(allRequests), 10) # expected 10 requests
for request in allRequests:
self.failUnless(request[u"key"] == u"NewlyHeld")
def testB_view_campaign(self):
"""
Testing the 'campaign' view.
Returns requests with campaign_id as keys.
"""
_, campaignIds, requestIds = self._inject(7) # creates x docs/requests
campView = self._getViewResults("campaign")
self.assertEqual(len(campView), 7)
for c in campView:
self.failUnless(c[u"key"] in campaignIds)
self.failUnless(c[u"value"][u"request_id"] in requestIds)
# check that result ('value') dictionary has all these keys
map(self.failUnless, [c[u"value"].has_key(key) for key in ("doc_id", "state", "updated")])
def testC_view_campaign_ids(self):
"""
Testing the 'campaign_ids' view.
Returns a list of campaign names (campaign_ids) with duplicates removed.
"""
_, campaignIds, _ = self._inject(8) # creates x docs/requests
campList = self._getViewResults("campaign_ids", options = {"group": True})
expected = [campList[0]["key"], campList[1]["key"]]
self.assertEqual(expected, campaignIds)
def testD_view_reject_update_changestate(self):
"""
Testing the 'reject' view.
Calls changeState function which also tests 'changestate'
update (CouchDB) function.
#.........这里部分代码省略.........
示例14: main
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import document [as 别名]
def main():
myDB = Database('workqueue_inbox', 'https://vocms169.cern.ch/couchdb')
document = myDB.document(sys.argv[1])
inputs = document['WMCore.WorkQueue.DataStructs.WorkQueueElement.WorkQueueElement']['Inputs']
for block in inputs:
print ' "%s",' % block
示例15: main
# 需要导入模块: from WMCore.Database.CMSCouch import Database [as 别名]
# 或者: from WMCore.Database.CMSCouch.Database import document [as 别名]
def main():
parser = OptionParser()
parser.add_option("-f", "--input-acdc", dest="acdcList")
parser.add_option("-m", "--input-mapfile", dest="mapFile")
parser.add_option("-u", "--url", dest="url")
parser.add_option("-d", "--dry-run", dest="dryRun",
action="store_true", default=False)
parser.add_option("-l", "--log-file", dest="logFile")
(options, _) = parser.parse_args()
handle = open(options.logFile, 'w')
url = options.url
database = 'wmagent_acdc'
acdcDB = Database(database, url)
handle.write('Opening ACDC database in %s/%s\n' % (url, database))
inputACDC = readACDCInput(options.acdcList)
usersMap = readUsersMap(options.mapFile)
handle.write('Have %d workflows to fix\n' % len(inputACDC))
handle.write('=================================================================\n')
for workflow in inputACDC:
collection_name = workflow['collection_name']
fileset_name = workflow['fileset_name']
original_dn = workflow['original_dn']
handle.write('Original workflow: %s\n' % collection_name)
handle.write('Original task: %s\n' % fileset_name)
handle.write('Original owner DN: %s\n' % original_dn)
if original_dn in usersMap:
handle.write('This DN maps to %s-%s\n' % (usersMap[original_dn][1], usersMap[original_dn][0]))
else:
handle.write('The original DN can not be found in the map file, skipping the workflow\n')
continue
params = {'reduce' : False,
'key' : [usersMap[original_dn][1], usersMap[original_dn][0], collection_name, fileset_name]}
result = acdcDB.loadView('ACDC', 'owner_coll_fileset_docs', params)
rows = result['rows']
docIds = map(lambda x : x['id'], rows)
handle.write('Found %d documents to change\n' % len(rows))
handle.write('Changing from %s-%s to %s-%s\n' % (usersMap[original_dn][1], usersMap[original_dn][0],
workflow['group'], workflow['owner']))
for docId in docIds:
doc = acdcDB.document(docId)
doc['owner'] = {'group' : workflow['group'], 'user' : workflow['owner']}
if not options.dryRun:
acdcDB.queue(doc)
if not options.dryRun:
response = acdcDB.commit()
else:
response = 'This is a dry-run no changes were made'
handle.write('Response to write operation: %s\n'% str(response))
handle.write('Response length: %d\n' % len(response))
handle.write('=================================================================\n')
handle.write('Finished script')
handle.close()