本文整理汇总了Python中vizgrimoire.GrimoireUtils.createJSON函数的典型用法代码示例。如果您正苦于以下问题:Python createJSON函数的具体用法?Python createJSON怎么用?Python createJSON使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了createJSON函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_filter_report_top
def create_filter_report_top(filter_, period, startdate, enddate, destdir, npeople, identities_db):
from vizgrimoire.report import Report
items = Report.get_items()
if items is None:
items = EventsDS.get_filter_items(filter_, startdate, enddate, identities_db)
if (items == None): return
items = items['name']
filter_name = filter_.get_name()
if not isinstance(items, (list)):
items = [items]
fn = os.path.join(destdir, filter_.get_filename(EventsDS()))
createJSON(items, fn)
for item in items :
item_name = "'"+ item+ "'"
logging.info (item_name)
filter_item = Filter(filter_name, item)
if filter_name in ("repository"):
logging.warn("Filter 'repository' detected for top info")
top_authors = EventsDS.get_top_data(startdate, enddate, identities_db, filter_item, npeople)
logging.warn(filter_item.get_top_filename(EventsDS()))
fn = os.path.join(destdir, filter_item.get_top_filename(EventsDS()))
createJSON(top_authors, fn)
示例2: create_people_identifiers
def create_people_identifiers(startdate, enddate, destdir, npeople, identities_db):
from vizgrimoire.GrimoireUtils import check_array_values
logging.info("Generating people identifiers")
people = get_top_report(startdate, enddate, npeople, identities_db, only_people=True);
people_ids = [] # upeople_ids which need identifiers
people_data = {} # identifiers for upeople_ids
for ds in Report.get_data_sources():
periods = [".",".last year",".last month"]
top_names = ds.get_top_metrics();
for period in periods:
for top_name in top_names:
if top_name+period in people[ds.get_name()]:
if 'id' in people[ds.get_name()][top_name+period]:
people_ids += check_array_values(people[ds.get_name()][top_name+period])['id']
people_ids = list(set(people_ids))
import vizgrimoire.People as People
for upeople_id in people_ids:
people_data[upeople_id] = People.GetPersonIdentifiers(identities_db, upeople_id)
all_top_min_ds = get_top_people(startdate, enddate, identities_db)
for upeople_id in all_top_min_ds:
people_data[upeople_id] = People.GetPersonIdentifiers(identities_db, upeople_id)
createJSON(people_data, destdir+"/people.json")
return people_ids
示例3: test_get_filter_items
def test_get_filter_items(self):
opts = read_options()
automator = read_main_conf(opts.config_file)
identities_db = automator['generic']['db_identities']
for ds in Report.get_data_sources():
Report.connect_ds(ds)
for filter_ in Report.get_filters():
items = ds.get_filter_items(filter_, startdate, enddate, identities_db)
if items is None: continue
if (isinstance(items, dict)): items = items['name']
if not isinstance(items, (list)): items = [items]
if ds.get_name() in ["scr","pullpo"] :
items = [item.replace("/","_") for item in items]
elif ds.get_name() == "mls":
items = [item.replace("/","_").replace("<","__").replace(">","___")
for item in items]
fn = ds.get_name()+"-"+filter_.get_name_plural()+".json"
createJSON(items, opts.destdir+"/"+ fn)
test_json = os.path.join("json",fn)
new_json = opts.destdir+"/"+ fn
if ds.get_name() not in ["scr","pullpo"] :
# scr, pullpo repos format is more complex and
# is checked already in test_get_agg_evol_filters_data
self.assertTrue(self.compareJSON(test_json, new_json))
示例4: create_filter_report_top
def create_filter_report_top(filter_, period, startdate, enddate, destdir, npeople, identities_db):
from vizgrimoire.report import Report
items = Report.get_items()
if items is None:
items = SCR.get_filter_items(filter_, startdate, enddate, identities_db)
if (items == None): return
items = items['name']
filter_name = filter_.get_name()
if not isinstance(items, (list)):
items = [items]
fn = os.path.join(destdir, filter_.get_filename(SCR()))
createJSON(items, fn)
for item in items :
item_name = "'"+ item+ "'"
logging.info (item_name)
filter_item = Filter(filter_name, item)
if filter_name in ("company","project","repository"):
top_mergers = SCR.get_top_data(startdate, enddate, identities_db, filter_item, npeople)
fn = os.path.join(destdir, filter_item.get_top_filename(SCR()))
createJSON(top_mergers, fn)
示例5: create_filter_report_all
def create_filter_report_all(cls, filter_, period, startdate, enddate, destdir, npeople, identities_db):
check = False # activate to debug issues
filter_name = filter_.get_name()
# Change filter to GrimoireLib notation
filter_name = filter_name.replace("+", MetricFilters.DELIMITER)
if filter_name in ["people2","company", "country","repository","domain","project",
"company"+MetricFilters.DELIMITER+"country",
"company"+MetricFilters.DELIMITER+"project"] :
filter_all = Filter(filter_name, None)
agg_all = cls.get_agg_data(period, startdate, enddate,
identities_db, filter_all)
fn = os.path.join(destdir, filter_.get_static_filename_all(cls()))
createJSON(agg_all, fn)
ITS.convert_all_to_single(agg_all, filter_, destdir, False, period)
evol_all = cls.get_evolutionary_data(period, startdate, enddate,
identities_db, filter_all)
fn = os.path.join(destdir, filter_.get_evolutionary_filename_all(cls()))
createJSON(evol_all, fn)
ITS.convert_all_to_single(evol_all, filter_, destdir, True, period)
if check:
cls._check_report_all_data(evol_all, filter_, startdate, enddate,
identities_db, True, period)
cls._check_report_all_data(agg_all, filter_, startdate, enddate,
identities_db, False, period)
else:
logging.error(filter_name +" does not support yet group by items sql queries")
示例6: scm_report
def scm_report(dbcon, filters, output_dir):
# Basic activity and community metrics in source code
# management systems
dataset = {}
from vizgrimoire.analysis.onion_model import CommunityStructure
onion = CommunityStructure(dbcon, filters)
result = onion.result()
dataset["scm_core"] = result["core"]
dataset["scm_regular"] = result["regular"]
dataset["scm_occasional"] = result["occasional"]
authors_period = scm.AuthorsPeriod(dbcon, filters)
dataset["scm_authorsperiod"] = float(authors_period.get_agg()["avg_authors_month"])
authors = scm.Authors(dbcon, filters)
top_authors = authors.get_list()
createJSON(top_authors, output_dir + "scm_top_authors.json")
createCSV(top_authors, output_dir + "scm_top_authors.csv")
commits = scm.Commits(dbcon, filters)
dataset["scm_commits"] = commits.get_agg()["commits"]
authors = scm.Authors(dbcon, filters)
dataset["scm_authors"] = authors.get_agg()["authors"]
#companies = scm.Companies(dbcon, filters)
#top_companies = companies.get_list(filters)
#createJSON()
#createCSV()
return dataset
示例7: mls_report
def mls_report(dbcon, filters, output_dir):
dataset = {}
emails = mls.EmailsSent(dbcon, filters)
dataset["mls_sent"] = emails.get_agg()["sent"]
senders = mls.EmailsSenders(dbcon, filters)
dataset["mls_senders"] = senders.get_agg()["senders"]
senders_init = mls.SendersInit(dbcon, filters)
dataset["mls_senders_init"] = senders_init.get_agg()["senders_init"]
from vizgrimoire.analysis.threads import Threads
SetDBChannel(dbcon.user, dbcon.password, dbcon.database)
threads = Threads(filters.startdate, filters.enddate, dbcon.identities_db)
top_longest_threads = threads.topLongestThread(10)
top_longest_threads = serialize_threads(top_longest_threads, False, threads)
createJSON(top_longest_threads, output_dir + "/mls_top_longest_threads.json")
createCSV(top_longest_threads, output_dir + "/mls_top_longest_threads.csv")
top_crowded_threads = threads.topCrowdedThread(10)
top_crowded_threads = serialize_threads(top_crowded_threads, True, threads)
createJSON(top_crowded_threads, output_dir + "/mls_top_crowded_threads.json")
createCSV(top_crowded_threads, output_dir + "/mls_top_crowded_threads.csv")
return dataset
示例8: create_filter_report_all
def create_filter_report_all(filter_, period, startdate, enddate, destdir, npeople, identities_db):
check = False # activate to debug issues
filter_name = filter_.get_name()
# top by filter, not supported by group all queries
MLS.create_filter_report_top(filter_, period, startdate, enddate, destdir, npeople, identities_db)
if filter_name in ["people2","company","repository","country","domain","project"] :
filter_all = Filter(filter_name, None)
agg_all = MLS.get_agg_data(period, startdate, enddate,
identities_db, filter_all)
fn = os.path.join(destdir, filter_.get_static_filename_all(MLS()))
createJSON(agg_all, fn)
MLS.convert_all_to_single(agg_all, filter_, destdir, False, period)
evol_all = MLS.get_evolutionary_data(period, startdate, enddate,
identities_db, filter_all)
fn = os.path.join(destdir, filter_.get_evolutionary_filename_all(MLS()))
createJSON(evol_all, fn)
MLS.convert_all_to_single(evol_all, filter_, destdir, True, period)
if check:
MLS._check_report_all_data(evol_all, filter_, startdate, enddate,
identities_db, True, period)
MLS._check_report_all_data(agg_all, filter_, startdate, enddate,
identities_db, False, period)
else:
logging.error(filter_name +" does not support yet group by items sql queries")
示例9: mls_report
def mls_report(dbcon, filters):
# Per release MLS information
emails = mls.EmailsSent(dbcon, filters)
createJSON(emails.get_agg(), "./release/mls_emailssent.json")
senders = mls.EmailsSenders(dbcon, filters)
createJSON(senders.get_agg(), "./release/mls_emailssenders.json")
senders_init = mls.SendersInit(dbcon, filters)
createJSON(senders_init.get_agg(), "./release/mls_sendersinit.json")
dataset = {}
dataset["sent"] = emails.get_agg()["sent"]
dataset["senders"] = senders.get_agg()["senders"]
dataset["senders_init"] = senders_init.get_agg()["senders_init"]
from vizgrimoire.analysis.threads import Threads
SetDBChannel(dbcon.user, dbcon.password, dbcon.database)
threads = Threads(filters.startdate, filters.enddate, dbcon.identities_db)
top_longest_threads = threads.topLongestThread(10)
top_longest_threads = serialize_threads(top_longest_threads, False, threads)
createJSON(top_longest_threads, "./release/mls_top_longest_threads.json")
createCSV(top_longest_threads, "./release/mls_top_longest_threads.csv")
top_crowded_threads = threads.topCrowdedThread(10)
top_crowded_threads = serialize_threads(top_crowded_threads, True, threads)
createJSON(top_crowded_threads, "./release/mls_top_crowded_threads.json")
createCSV(top_crowded_threads, "./release/mls_top_crowded_threads.csv")
return dataset
示例10: result
def result(self, data_source, destdir = None):
from vizgrimoire.SCR import SCR
if data_source != SCR or destdir is None: return
period = self.filters.period
startdate = self.filters.startdate
enddate = self.filters.enddate
code_contrib = {}
code_contrib["submitters"] = self.GetNewSubmitters()
code_contrib["mergers"] = self.GetNewMergers()
code_contrib["abandoners"] = self.GetNewAbandoners()
createJSON(code_contrib, destdir+"/scr-code-contrib-new.json")
code_contrib = {}
code_contrib["submitters"] = self.GetGoneSubmitters()
code_contrib["mergers"] = self.GetGoneMergers()
code_contrib["abandoners"] = self.GetGoneAbandoners()
createJSON(code_contrib, destdir+"/scr-code-contrib-gone.json")
data = self.GetNewSubmittersActivity()
evol = {}
evol['people'] = {}
for uuid in data['uuid']:
pdata = self.db.GetPeopleEvolSubmissionsSCR(uuid, period, startdate, enddate)
pdata = completePeriodIds(pdata, period, startdate, enddate)
evol['people'][uuid] = {"submissions":pdata['submissions']}
# Just to have the time series data
evol = dict(evol.items() + pdata.items())
if 'changes' in evol:
del evol['changes'] # closed (metrics) is included in people
createJSON(evol, destdir+"/new-people-activity-scr-evolutionary.json")
data = self.GetGoneSubmittersActivity()
evol = {}
evol['people'] = {}
for uuid in data['uuid']:
pdata = self.db.GetPeopleEvolSubmissionsSCR(uuid, period, startdate, enddate)
pdata = completePeriodIds(pdata, period, startdate, enddate)
evol['people'][uuid] = {"submissions":pdata['submissions']}
# Just to have the time series data
evol = dict(evol.items() + pdata.items())
if 'changes' in evol:
del evol['changes'] # closed (metrics) is included in people
createJSON(evol, destdir+"/gone-people-activity-scr-evolutionary.json")
# data = GetPeopleLeaving()
# createJSON(data, destdir+"/leaving-people-scr.json")
evol = {}
data = completePeriodIds(self.db.GetPeopleIntake(0,1), period, startdate, enddate)
evol[period] = data[period]
evol['id'] = data['id']
evol['date'] = data['date']
evol['num_people_1'] = data['people']
evol['num_people_1_5'] = completePeriodIds(self.db.GetPeopleIntake(1,5),period, startdate, enddate)['people']
evol['num_people_5_10'] = completePeriodIds(self.db.GetPeopleIntake(5,10), period, startdate, enddate)['people']
createJSON(evol, destdir+"/scr-people-intake-evolutionary.json")
示例11: create_filter_report
def create_filter_report(cls, filter_, period, startdate, enddate, destdir, npeople, identities_db):
from vizgrimoire.report import Report
items = Report.get_items()
if items is None:
items = cls.get_filter_items(filter_, startdate, enddate, identities_db)
if (items == None): return
items = items['name']
filter_name = filter_.get_name()
if not isinstance(items, (list)):
items = [items]
fn = os.path.join(destdir, filter_.get_filename(cls()))
createJSON(items, fn)
if filter_name in ("domain", "company", "repository"):
items_list = {'name' : [], 'closed_365' : [], 'closers_365' : []}
else:
items_list = items
for item in items :
item_name = "'"+ item+ "'"
logging.info (item_name)
filter_item = Filter(filter_name, item)
evol_data = cls.get_evolutionary_data(period, startdate, enddate, identities_db, filter_item)
fn = os.path.join(destdir, filter_item.get_evolutionary_filename(cls()))
createJSON(evol_data, fn)
agg = cls.get_agg_data(period, startdate, enddate, identities_db, filter_item)
fn = os.path.join(destdir, filter_item.get_static_filename(cls()))
createJSON(agg, fn)
if filter_name in ["domain", "company", "repository"]:
items_list['name'].append(item.replace('/', '_'))
items_list['closed_365'].append(agg['closed_365'])
items_list['closers_365'].append(agg['closers_365'])
if filter_name in ["company","domain","repository"]:
top = cls.get_top_data(startdate, enddate, identities_db, filter_item, npeople)
fn = os.path.join(destdir, filter_item.get_top_filename(cls()))
createJSON(top, fn)
fn = os.path.join(destdir, filter_.get_filename(cls()))
createJSON(items_list, fn)
if (filter_name == "company"):
ds = ITS
#summary = cls.get_filter_summary(
# filter_, period, startdate, enddate,
# identities_db, 10
# )
#createJSON (summary,
# destdir + "/" + filter_.get_summary_filename(cls))
# Perform ages study, if it is specified in Report
cls.ages_study_com (items, period, startdate, enddate, destdir)
示例12: result
def result(self, data_source = None, destdir = None):
fields = Set([])
tables = Set([])
filters = Set([])
fields.add("tr.url as project_name")
fields.add("pro.name as author_name")
fields.add("org.name as organization")
fields.add("i.issue as gerrit_issue_id")
fields.add("i.summary as summary")
fields.add("i.submitted_on as first_upload")
fields.add("t.last_upload as last_upload")
tables.add("issues i")
tables.add("trackers tr")
tables.add("people_uidentities puid")
tables.add(self.db.identities_db + ".enrollments enr")
tables.add(self.db.identities_db + ".organizations org")
tables.add(self.db.identities_db + ".profiles pro")
tables.add("(select issue_id, max(changed_on) as last_upload from changes where field='status' and new_value='UPLOADED' group by issue_id) t")
filters.add("t.issue_id = i.id")
filters.add("i.id not in (select distinct(issue_id) from changes where field='Code-Review')")
filters.add("i.status<>'Abandoned'")
filters.add("i.status<>'Merged'")
filters.add("tr.id=i.tracker_id")
filters.add("i.submitted_by=puid.people_id")
filters.add("puid.uuid = enr.uuid")
filters.add("i.submitted_on >= enr.start")
filters.add("i.submitted_on < enr.end")
filters.add("enr.organization_id = org.id")
filters.add("puid.uuid = pro.uuid")
filters.add("i.summary not like '%WIP%'")
query = " select " + self.db._get_fields_query(fields)
query = query + " from " + self.db._get_tables_query(tables)
query = query + " where " + self.db._get_filters_query(filters)
query = query + " order by org.name, t.last_upload"
data = self.db.ExecuteQuery(query)
# TODO: Hardcoded creation of file
createJSON(data, destdir + "/scr-oldest_changesets_by_affiliation.json")
#Filtering the data to have only 10 entries per organization at most
data_df = pandas.DataFrame(data, columns=["gerrit_issue_id", "project_name", "organization", "last_upload", "first_upload", "summary", "author_name"])
organizations = pandas.unique(data_df.organization)
dataframes = []
for organization in organizations:
dataframes.append(data_df[data_df["organization"]==organization][1:10])
filter_orgs = pandas.concat(dataframes)
filter_orgs = filter_orgs.to_dict(orient="list")
createJSON(filter_orgs, destdir + "/scr-oldest_changesets_by_affiliation.json")
return filter_orgs
示例13: _compare_data
def _compare_data(self, data, json_file):
# Create a temporary JSON file with data
from tempfile import NamedTemporaryFile
data_file = NamedTemporaryFile()
data_file_name = data_file.name
data_file.close()
createJSON(data, data_file_name, check=False, skip_fields = [])
check = self.compareJSON(json_file, data_file_name)
if check: os.remove(data_file_name)
return check
示例14: scm_report
def scm_report(dbcon, filters):
# Per release aggregated information
project_name = filters.type_analysis[1]
project_name = project_name.replace(" ", "")
commits = scm.Commits(dbcon, filters)
createJSON(commits.get_agg(), "./release/scm_commits_"+project_name+".json")
authors = scm.Authors(dbcon, filters)
createJSON(authors.get_agg(), "./release/scm_authors_"+project_name+".json")
dataset = {}
dataset["commits"] = commits.get_agg()["commits"]
dataset["authors"] = authors.get_agg()["authors"]
# tops authors activity
top_authors = authors.get_list()
if not isinstance(top_authors["commits"], list):
top_authors["commits"] = [top_authors["commits"]]
top_authors["id"] = [top_authors["id"]]
top_authors["authors"] = [top_authors["authors"]]
createJSON(top_authors, "./release/scm_top_authors_project_"+project_name+".json")
createCSV(top_authors, "./release/scm_top_authors_project_"+project_name+".csv", ["id"])
# top companies activity
companies = scm.Companies(dbcon, filters)
top_companies = companies.get_list(filters)
if not isinstance(top_companies["company_commits"], list):
top_companies["company_commits"] = [top_companies["company_commits"]]
top_companies["companies"] = [top_companies["name"]]
createJSON(top_companies, "./release/scm_top_companies_project_"+project_name+".json")
createCSV(top_companies, "./release/scm_top_companies_project_"+project_name+".csv")
return dataset
示例15: create_filter_report_all
def create_filter_report_all(filter_, period, startdate, enddate, destdir, npeople, identities_db):
filter_name = filter_.get_name()
if filter_name == "people2" or filter_name == "company":
filter_all = Filter(filter_name, None)
agg_all = Mediawiki.get_agg_data(period, startdate, enddate,
identities_db, filter_all)
fn = os.path.join(destdir, filter_.get_static_filename_all(Mediawiki()))
createJSON(agg_all, fn)
evol_all = Mediawiki.get_evolutionary_data(period, startdate, enddate,
identities_db, filter_all)
fn = os.path.join(destdir, filter_.get_evolutionary_filename_all(Mediawiki()))
createJSON(evol_all, fn)
else:
logging.error(Mediawiki.get_name()+ " " + filter_name +" does not support yet group by items sql queries")