本文整理汇总了Python中indico.modules.events.contributions.models.contributions.Contribution类的典型用法代码示例。如果您正苦于以下问题:Python Contribution类的具体用法?Python Contribution怎么用?Python Contribution使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Contribution类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_contribution
def create_contribution(event, contrib_data, custom_fields_data=None, session_block=None, extend_parent=False):
start_dt = contrib_data.pop('start_dt', None)
contrib = Contribution(event_new=event)
contrib.populate_from_dict(contrib_data)
if start_dt is not None:
schedule_contribution(contrib, start_dt=start_dt, session_block=session_block, extend_parent=extend_parent)
if custom_fields_data:
set_custom_fields(contrib, custom_fields_data)
db.session.flush()
signals.event.contribution_created.send(contrib)
logger.info('Contribution %s created by %s', contrib, session.user)
contrib.event_new.log(EventLogRealm.management, EventLogKind.positive, 'Contributions',
'Contribution "{}" has been created'.format(contrib.title), session.user)
return contrib
示例2: _migrate_event_reviewing
def _migrate_event_reviewing(self, conf):
conference_settings = getattr(conf, '_confPaperReview', None)
if not conference_settings:
return
event = conf.as_event
contrib_index = conference_settings._contribution_index = IOBTree()
contrib_reviewers = _invert_mapping(conference_settings._reviewerContribution)
contrib_referees = _invert_mapping(conference_settings._refereeContribution)
contrib_editors = _invert_mapping(conference_settings._editorContribution)
for old_contribution in conf.contributions.itervalues():
review_manager = getattr(old_contribution, '_reviewManager', None)
new_contribution = Contribution.find_one(event_id=event.id, friendly_id=int(old_contribution.id))
cid = int(new_contribution.id)
if review_manager:
review_manager._contrib_id = cid
contrib_index[cid] = review_manager
self._migrate_contribution_roles(old_contribution, new_contribution, contrib_reviewers,
PaperReviewingRoleType.reviewer, event.id)
self._migrate_contribution_roles(old_contribution, new_contribution, contrib_referees,
PaperReviewingRoleType.referee, event.id)
self._migrate_contribution_roles(old_contribution, new_contribution, contrib_editors,
PaperReviewingRoleType.editor, event.id)
self._migrate_reviewing_materials(old_contribution, new_contribution, review_manager, event.id)
示例3: obj_deref
def obj_deref(ref):
"""Returns the object identified by `ref`"""
from indico_livesync.models.queue import EntryType
if ref['type'] == EntryType.category:
return CategoryManager().getById(ref['category_id'], True)
elif ref['type'] == EntryType.event:
return Event.get(ref['event_id'])
elif ref['type'] == EntryType.contribution:
return Contribution.get(ref['contrib_id'])
elif ref['type'] == EntryType.subcontribution:
return SubContribution.get(ref['subcontrib_id'])
else:
raise ValueError('Unexpected object type: {}'.format(ref['type']))
示例4: _process_args
def _process_args(self):
data = request.json
self.object = None
if 'categId' in data:
self.object = Category.get_one(data['categId'])
elif 'contribId' in data:
self.object = Contribution.get_one(data['contribId'])
elif 'sessionId' in data:
self.object = Session.get_one(data['sessionId'])
elif 'confId' in data:
self.object = Event.get_one(data['confId'])
if self.object is None:
raise BadRequest
示例5: test_contrib_friendly_id
def test_contrib_friendly_id(monkeypatch, dummy_event, create_contribution):
counter = Incrementer()
monkeypatch.setattr(contrib_module, 'increment_and_get', counter)
contrib_1 = create_contribution(dummy_event, 'Contribution 1', timedelta(minutes=60))
assert contrib_1.friendly_id == 1
contrib_2 = create_contribution(dummy_event, 'Contribution 2', timedelta(minutes=60))
assert contrib_2.friendly_id == 2
assert counter == 2
# pre-allocate 8 friendly ids
Contribution.allocate_friendly_ids(dummy_event, 8)
assert g.friendly_ids[Contribution][dummy_event.id] == range(3, 11)
assert counter == 10
for fid in g.friendly_ids[Contribution][dummy_event.id][:]:
contrib = create_contribution(dummy_event, 'Contribution {}'.format(fid), timedelta(minutes=30))
assert contrib.friendly_id == fid
# increment_and_get doesn't get called because the ids
# have been pre-allocated
assert counter == 10
示例6: _process_cascaded_event_contents
def _process_cascaded_event_contents(records, additional_events=None):
"""
Flatten a series of records into its most basic elements (subcontribution level).
Yields results.
:param records: queue records to process
:param additional_events: events whose content will be included in addition to those
found in records
"""
changed_events = additional_events or set()
changed_contributions = set()
changed_subcontributions = set()
session_records = {rec.session_id for rec in records if rec.type == EntryType.session}
contribution_records = {rec.contrib_id for rec in records if rec.type == EntryType.contribution}
subcontribution_records = {rec.subcontrib_id for rec in records if rec.type == EntryType.subcontribution}
event_records = {rec.event_id for rec in records if rec.type == EntryType.event}
if event_records:
changed_events.update(Event.find(Event.id.in_(event_records)))
for event in changed_events:
yield event
# Sessions are added (explicitly changed only, since they don't need to be sent anywhere)
if session_records:
changed_contributions.update(Contribution
.find(Contribution.session_id.in_(session_records), ~Contribution.is_deleted))
# Contributions are added (implictly + explicitly changed)
changed_event_ids = {ev.id for ev in changed_events}
condition = Contribution.event_id.in_(changed_event_ids) & ~Contribution.is_deleted
if contribution_records:
condition = db.or_(condition, Contribution.id.in_(contribution_records))
contrib_query = Contribution.find(condition).options(joinedload('subcontributions'))
for contribution in contrib_query:
yield contribution
changed_subcontributions.update(contribution.subcontributions)
# Same for subcontributions
if subcontribution_records:
changed_subcontributions.update(SubContribution.find(SubContribution.id.in_(subcontribution_records)))
for subcontrib in changed_subcontributions:
yield subcontrib
示例7: _handleGet
def _handleGet(self):
contributions = Contribution.find(event_new=self._conf.as_event, is_deleted=False).options(
joinedload("timetable_entry"), joinedload("paper_reviewing_roles")
)
filter = {}
# filtering if the active user is a referee: he can only see his own contribs
isOnlyReferee = (
RCReferee.hasRights(self)
and not RCPaperReviewManager.hasRights(self)
and not self._conf.canModify(self.getAW())
)
# We want to make an 'or', not an 'and' of the reviewing assign status
filter["reviewing"] = {}
if isOnlyReferee:
filter["reviewing"]["referee"] = self._getUser()
elif self._showWithReferee:
filter["reviewing"]["referee"] = "any"
if self._showWithEditor:
filter["reviewing"]["editor"] = "any"
if self._showWithReviewer:
filter["reviewing"]["reviewer"] = "any"
filter["type"] = self._selTypes
filter["track"] = self._selTracks
filter["session"] = self._selSessions
filter["materialsubmitted"] = self._showWithMaterial
filterCrit = ContributionsReviewingFilterCrit(self._conf, filter)
sortingCrit = contribFilters.SortingCriteria(["number"])
filterCrit.getField("type").setShowNoValue(self._typeShowNoValue)
filterCrit.getField("track").setShowNoValue(self._trackShowNoValue)
filterCrit.getField("session").setShowNoValue(self._sessionShowNoValue)
filterCrit.getField("reviewing").setShowNoValue(self._showWithoutTeam)
filterCrit.getField("materialsubmitted").setShowNoValue(self._showWithoutMaterial)
f = filters.SimpleFilter(filterCrit, sortingCrit)
contributions = f.apply(contributions)
return [_serialize_contribution(contrib) for contrib in contributions]
示例8: _process_args
def _process_args(self):
RHPapersBase._process_args(self)
self.contribution = Contribution.get_one(request.view_args['contrib_id'], is_deleted=False)
self.paper = self.contribution.paper
if self.paper is None and self.PAPER_REQUIRED:
raise NotFound
示例9: import_contributions_from_csv
def import_contributions_from_csv(event, f):
"""Import timetable contributions from a CSV file into an event."""
reader = csv.reader(f.read().splitlines())
contrib_data = []
for num_row, row in enumerate(reader, 1):
try:
start_dt, duration, title, first_name, last_name, affiliation, email = \
[to_unicode(value).strip() for value in row]
email = email.lower()
except ValueError:
raise UserValueError(_('Row {}: malformed CSV data - please check that the number of columns is correct')
.format(num_row))
try:
parsed_start_dt = event.tzinfo.localize(dateutil.parser.parse(start_dt)) if start_dt else None
except ValueError:
raise UserValueError(_("Row {row}: can't parse date: \"{date}\"").format(row=num_row, date=start_dt))
try:
parsed_duration = timedelta(minutes=int(duration)) if duration else None
except ValueError:
raise UserValueError(_("Row {row}: can't parse duration: {duration}").format(row=num_row,
duration=duration))
if not title:
raise UserValueError(_("Row {}: contribution title is required").format(num_row))
if email and not validate_email(email):
raise UserValueError(_("Row {row}: invalid email address: {email}").format(row=num_row, email=email))
contrib_data.append({
'start_dt': parsed_start_dt,
'duration': parsed_duration or timedelta(minutes=20),
'title': title,
'speaker': {
'first_name': first_name,
'last_name': last_name,
'affiliation': affiliation,
'email': email
}
})
# now that we're sure the data is OK, let's pre-allocate the friendly ids
# for the contributions in question
Contribution.allocate_friendly_ids(event, len(contrib_data))
contributions = []
all_changes = defaultdict(list)
for contrib_fields in contrib_data:
speaker_data = contrib_fields.pop('speaker')
with track_time_changes() as changes:
contribution = create_contribution(event, contrib_fields, extend_parent=True)
contributions.append(contribution)
for key, val in changes[event].viewitems():
all_changes[key].append(val)
email = speaker_data['email']
if not email:
continue
# set the information of the speaker
person = get_event_person(event, {
'firstName': speaker_data['first_name'],
'familyName': speaker_data['last_name'],
'affiliation': speaker_data['affiliation'],
'email': email
})
link = ContributionPersonLink(person=person, is_speaker=True)
link.populate_from_dict({
'first_name': speaker_data['first_name'],
'last_name': speaker_data['last_name'],
'affiliation': speaker_data['affiliation']
})
contribution.person_links.append(link)
return contributions, all_changes
示例10: get_category_timetable
def get_category_timetable(categ_ids, start_dt, end_dt, detail_level='event', tz=utc, from_categ=None, grouped=True):
"""Retrieve time blocks that fall within a specific time interval
for a given set of categories.
:param categ_ids: iterable containing list of category IDs
:param start_dt: start of search interval (``datetime``, expected
to be in display timezone)
:param end_dt: end of search interval (``datetime`` in expected
to be in display timezone)
:param detail_level: the level of detail of information
(``event|session|contribution``)
:param tz: the ``timezone`` information should be displayed in
:param from_categ: ``Category`` that will be taken into account to calculate
visibility
:param grouped: Whether to group results by start date
:returns: a dictionary containing timetable information in a
structured way. See source code for examples.
"""
day_start = start_dt.astimezone(utc)
day_end = end_dt.astimezone(utc)
dates_overlap = lambda t: (t.start_dt >= day_start) & (t.start_dt <= day_end)
items = defaultdict(lambda: defaultdict(list))
# first of all, query TimetableEntries/events that fall within
# specified range of dates (and category set)
events = _query_events(categ_ids, day_start, day_end)
if from_categ:
events = events.filter(Event.is_visible_in(from_categ))
for eid, tt_start_dt in events:
if tt_start_dt:
items[eid][tt_start_dt.astimezone(tz).date()].append(tt_start_dt)
else:
items[eid] = None
# then, retrieve detailed information about the events
event_ids = set(items)
query = (Event.find(Event.id.in_(event_ids))
.options(subqueryload(Event.person_links).joinedload(EventPersonLink.person),
joinedload(Event.own_room).noload('owner'),
joinedload(Event.own_venue),
joinedload(Event.category).undefer('effective_icon_data'),
undefer('effective_protection_mode')))
scheduled_events = defaultdict(list)
ongoing_events = []
events = []
for e in query:
if grouped:
local_start_dt = e.start_dt.astimezone(tz).date()
local_end_dt = e.end_dt.astimezone(tz).date()
if items[e.id] is None:
# if there is no TimetableEntry, this means the event has not timetable on that interval
for day in iterdays(max(start_dt.date(), local_start_dt), min(end_dt.date(), local_end_dt)):
# if the event starts on this date, we've got a time slot
if day.date() == local_start_dt:
scheduled_events[day.date()].append((e.start_dt, e))
else:
ongoing_events.append(e)
else:
for start_d, start_dts in items[e.id].viewitems():
scheduled_events[start_d].append((start_dts[0], e))
else:
events.append(e)
# result['events'][date(...)] -> [(datetime(....), Event(...))]
# result[event_id]['contribs'][date(...)] -> [(TimetableEntry(...), Contribution(...))]
# result['ongoing_events'] = [Event(...)]
if grouped:
result = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
else:
result = defaultdict(lambda: defaultdict(list))
result.update({
'events': scheduled_events if grouped else events,
'ongoing_events': ongoing_events
})
# according to detail level, ask for extra information from the DB
if detail_level != 'event':
query = _query_blocks(event_ids, dates_overlap, detail_level)
if grouped:
for b in query:
start_date = b.timetable_entry.start_dt.astimezone(tz).date()
result[b.session.event_id]['blocks'][start_date].append((b.timetable_entry, b))
else:
for b in query:
result[b.session.event_id]['blocks'].append(b)
if detail_level == 'contribution':
query = (Contribution.find(Contribution.event_id.in_(event_ids),
dates_overlap(TimetableEntry),
~Contribution.is_deleted)
.options(contains_eager(Contribution.timetable_entry),
joinedload(Contribution.person_links))
.join(TimetableEntry))
if grouped:
for c in query:
start_date = c.timetable_entry.start_dt.astimezone(tz).date()
result[c.event_id]['contribs'][start_date].append((c.timetable_entry, c))
else:
#.........这里部分代码省略.........
示例11: _process_args
def _process_args(self):
RHDisplayEventBase._process_args(self)
self.contrib = Contribution.get_one(request.view_args['contrib_id'], is_deleted=False)
示例12: _process_args
def _process_args(self):
RHManageContributionsBase._process_args(self)
self.contrib = Contribution.find_one(id=request.view_args['contrib_id'], is_deleted=False)
示例13: contribution
def contribution(self):
return Contribution.get(self._contrib_id)