当前位置: 首页>>代码示例>>Python>>正文


Python expression.tuple_函数代码示例

本文整理汇总了Python中sqlalchemy.sql.expression.tuple_函数的典型用法代码示例。如果您正苦于以下问题:Python tuple_函数的具体用法?Python tuple_怎么用?Python tuple_使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了tuple_函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _populate_cache

 def _populate_cache(self, stubs):
   """Fetch all mappings for objects in stubs, cache them in self.cache."""
   # Union is here to convince mysql to use two separate indices and
   # merge te results. Just using `or` results in a full-table scan
   # Manual column list avoids loading the full object which would also try to
   # load related objects
   cols = db.session.query(
       Relationship.source_type, Relationship.source_id,
       Relationship.destination_type, Relationship.destination_id)
   relationships = cols.filter(
       tuple_(Relationship.source_type, Relationship.source_id).in_(
           [(s.type, s.id) for s in stubs]
       )
   ).union_all(
       cols.filter(
           tuple_(Relationship.destination_type,
                  Relationship.destination_id).in_(
                      [(s.type, s.id) for s in stubs]))
   ).all()
   for (src_type, src_id, dst_type, dst_id) in relationships:
     src = Stub(src_type, src_id)
     dst = Stub(dst_type, dst_id)
     # only store a neighbor if we queried for it since this way we know
     # we'll be storing complete neighborhood by the end of the loop
     if src in stubs:
       self.cache[src].add(dst)
     if dst in stubs:
       self.cache[dst].add(src)
开发者ID:Smotko,项目名称:ggrc-core,代码行数:28,代码来源:__init__.py

示例2: _set_latest_revisions

def _set_latest_revisions(objects):
  """Set latest revision_id for given child_type.

  Args:
    objects: list of snapshot objects with child_id and child_type set.
  """
  pairs = [(o.child_type, o.child_id) for o in objects]
  query = db.session.query(
      func.max(revision.Revision.id, name="id", identifier="id"),
      revision.Revision.resource_type,
      revision.Revision.resource_id,
  ).filter(
      tuple_(
          revision.Revision.resource_type,
          revision.Revision.resource_id,
      ).in_(pairs)
  ).group_by(
      revision.Revision.resource_type,
      revision.Revision.resource_id,
  )
  id_map = {(r_type, r_id): id_ for id_, r_type, r_id in query}
  for o in objects:
    o.revision_id = id_map.get((o.child_type, o.child_id))
    if o.revision_id is None:
      raise exceptions.InternalServerError(errors.MISSING_REVISION)
开发者ID:google,项目名称:ggrc-core,代码行数:25,代码来源:snapshot.py

示例3: yield_records_to_be_deleted

def yield_records_to_be_deleted(prod_conn, table_name, records_marked_for_deletion, batch_size=100):
    """Yield records to  marked as deleted from pre-prod table


    The methods yields records marked for delete('W') from the pre-prod database table

    :param prod_conn: connection object to prod database
    :param table_name: name of the table as string
    :param records_marked_for_deletion: records from pre-prod marked for deletion (rec_status: 'W')
    :param batch_size: batch size to yield results

    @return: Yields records from prod in size of batch_size
    """
    table = prod_conn.get_table(table_name)
    natural_keys = get_natural_key_columns(table)
    columns_to_select = [table.c[column_name] for column_name in get_columns_names_to_pick_for_delete(table)]
    key_columns = [table.columns[key] for key in natural_keys]
    key_values = [[row[key] for key in natural_keys] for row in records_marked_for_deletion]
    query = select(columns_to_select, from_obj=table).where(and_(table.c.rec_status == Constants.STATUS_CURRENT,
                                                                 tuple_(*key_columns).in_(key_values)))
    result = prod_conn.execute(query, stream_results=True)
    rows = result.fetchmany(batch_size)
    while len(rows) > 0:
        yield rows
        rows = result.fetchmany(batch_size)
开发者ID:SmarterApp,项目名称:RDW_DataWarehouse,代码行数:25,代码来源:move_to_target.py

示例4: get_snapshots

def get_snapshots(objects=None, ids=None):
  with benchmark("snapshotter.helpers.get_snapshots"):
    if objects and ids:
      raise Exception(
          "Insert only iterable of (parent, child) tuples or set of IDS")
    columns = db.session.query(
        models.Snapshot.id,
        models.Snapshot.context_id,
        models.Snapshot.created_at,
        models.Snapshot.updated_at,
        models.Snapshot.parent_type,
        models.Snapshot.parent_id,
        models.Snapshot.child_type,
        models.Snapshot.child_id,
        models.Snapshot.revision_id,
        models.Snapshot.modified_by_id,
    )
    if objects:
      return columns.filter(
          tuple_(
              models.Snapshot.parent_type,
              models.Snapshot.parent_id,
              models.Snapshot.child_type,
              models.Snapshot.child_id
          ).in_({(parent.type, parent.id, child.type, child.id)
                 for parent, child in objects}))
    if ids:
      return columns.filter(
          models.Snapshot.id.in_(ids))
    return set()
开发者ID:VinnieJohns,项目名称:ggrc-core,代码行数:30,代码来源:helpers.py

示例5: related

  def related(self, obj):
    if obj in self.cache:
      return self.cache[obj]
    # Pre-fetch neighborhood for enqueued object since we're gonna need that
    # results in a few steps. This drastically reduces number of queries.
    stubs = {s for rel in self.queue for s in rel}
    stubs.add(obj)
    # Union is here to convince mysql to use two separate indices and
    # merge te results. Just using `or` results in a full-table scan
    # Manual column list avoids loading the full object which would also try to
    # load related objects
    cols = db.session.query(
        Relationship.source_type, Relationship.source_id,
        Relationship.destination_type, Relationship.destination_id)
    relationships = cols.filter(
        tuple_(Relationship.source_type, Relationship.source_id).in_(
            [(s.type, s.id) for s in stubs]
        )
    ).union_all(
        cols.filter(
            tuple_(Relationship.destination_type,
                   Relationship.destination_id).in_(
                       [(s.type, s.id) for s in stubs]))
    ).all()
    batch_requests = collections.defaultdict(set)
    for (src_type, src_id, dst_type, dst_id) in relationships:
      src = Stub(src_type, src_id)
      dst = Stub(dst_type, dst_id)
      # only store a neighbor if we queried for it since this way we know
      # we'll be storing complete neighborhood by the end of the loop
      batch_requests[src_type].add(src_id)
      batch_requests[dst_type].add(dst_id)
      if src in stubs:
        self.cache[src].add(dst)
      if dst in stubs:
        self.cache[dst].add(src)

    for type_, ids in batch_requests.iteritems():
      model = getattr(models.all_models, type_)
      instances = model.query.filter(model.id.in_(ids))
      for instance in instances:
        self.instance_cache[Stub(type_, instance.id)] = instance
    return self.cache[obj]
开发者ID:zidarsk8,项目名称:ggrc-core,代码行数:43,代码来源:__init__.py

示例6: pubroot

def pubroot(request, info, session):
    date = datetime.date.today()
    # If it's the early hours of the morning, it's more useful for us
    # to consider it still to be yesterday.
    if datetime.datetime.now().hour < 4:
        date = date - datetime.timedelta(1)
    thisweek_start = date - datetime.timedelta(date.weekday())
    thisweek_end = thisweek_start + datetime.timedelta(6)
    lastweek_start = thisweek_start - datetime.timedelta(7)
    lastweek_end = thisweek_end - datetime.timedelta(7)
    weekbefore_start = lastweek_start - datetime.timedelta(7)
    weekbefore_end = lastweek_end - datetime.timedelta(7)

    weeks = [
        ("Current week", thisweek_start, thisweek_end, business_totals(session, thisweek_start, thisweek_end)),
        ("Last week", lastweek_start, lastweek_end, business_totals(session, lastweek_start, lastweek_end)),
        (
            "The week before last",
            weekbefore_start,
            weekbefore_end,
            business_totals(session, weekbefore_start, weekbefore_end),
        ),
    ]

    currentsession = Session.current(session)
    barsummary = (
        session.query(StockLine)
        .filter(StockLine.location == "Bar")
        .order_by(StockLine.dept_id, StockLine.name)
        .options(joinedload_all("stockonsale.stocktype.unit"))
        .options(undefer_group("qtys"))
        .all()
    )
    stillage = (
        session.query(StockAnnotation)
        .join(StockItem)
        .outerjoin(StockLine)
        .filter(
            tuple_(StockAnnotation.text, StockAnnotation.time).in_(
                select(
                    [StockAnnotation.text, func.max(StockAnnotation.time)], StockAnnotation.atype == "location"
                ).group_by(StockAnnotation.text)
            )
        )
        .filter(StockItem.finished == None)
        .order_by(StockLine.name != null(), StockAnnotation.time)
        .options(joinedload_all("stockitem.stocktype.unit"))
        .options(joinedload_all("stockitem.stockline"))
        .options(undefer_group("qtys"))
        .all()
    )
    return (
        "index.html",
        {"currentsession": currentsession, "barsummary": barsummary, "stillage": stillage, "weeks": weeks},
    )
开发者ID:sde1000,项目名称:quicktill,代码行数:55,代码来源:views.py

示例7: delete_records

def delete_records(snapshot_ids):
  """Delete all records for some snapshots.
  Args:
    snapshot_ids: An iterable with snapshot IDs whose full text records should
        be deleted.
  """
  to_delete = {("Snapshot", _id) for _id in snapshot_ids}
  db.session.query(Record).filter(
      tuple_(Record.type, Record.key).in_(to_delete)
  ).delete(synchronize_session=False)
  db.session.commit()
开发者ID:VinnieJohns,项目名称:ggrc-core,代码行数:11,代码来源:indexer.py

示例8: get_records

def get_records(_audit, _snapshots):
  return db.session.query(Record).filter(
      tuple_(
          Record.type,
          Record.key,
          Record.property,
          Record.content
      ).in_(
          {("Snapshot", s.id, "parent", "Audit-{}".format(_audit.id))
           for s in _snapshots}
      ))
开发者ID:zidarsk8,项目名称:ggrc-core,代码行数:11,代码来源:test_indexing.py

示例9: switch

  def switch(self, betID):
    # Return redirect('/bet/' + betID + '/')
    user = User()
    if user:
      RUser = user[0]
      c.user = user[1]
      RMatch = db.Session.query(db.Matches).filter(db.Matches.id == betID).first()
      if RMatch:
        RBet = db.Session.query(db.Bets).filter(and_(db.Bets.match == RMatch.id, db.Bets.user == RUser.id)).first()
        if RBet.team == RMatch.team1:
          RBetsTotal1 = db.Session.query(db.BetsTotal).filter(and_(db.BetsTotal.match == RMatch.id, db.BetsTotal.team == RMatch.team1)).first()
          RBetsTotal2 = db.Session.query(db.BetsTotal).filter(and_(db.BetsTotal.match == RMatch.id, db.BetsTotal.team == RMatch.team2)).first()
          RBet.team = RMatch.team2
        else:
          RBetsTotal1 = db.Session.query(db.BetsTotal).filter(and_(db.BetsTotal.match == RMatch.id, db.BetsTotal.team == RMatch.team2)).first()
          RBetsTotal2 = db.Session.query(db.BetsTotal).filter(and_(db.BetsTotal.match == RMatch.id, db.BetsTotal.team == RMatch.team1)).first()
          RBet.team = RMatch.team1

        RBetsTotal1.value -= RBet.value
        RBetsTotal2.value += RBet.value

        keys = []
        totalGroups1 = defaultdict(Counter)
        for group in RBetsTotal1.groups:
          totalGroups1[group[0]][group[1]] = group[2]
          keys.append((group[0], group[1]))

        totalGroups2 = defaultdict(Counter)
        for group in RBetsTotal2.groups:
          totalGroups2[group[0]][group[1]] = group[2]
          keys.append((group[0], group[1]))
        
        # Convert PostgreSQL's multidimensional array to dictionary
        usersGroups = defaultdict(Counter)
        for group in RBet.groups:
          totalGroups1[group[0]][group[1]] -= group[2]
          totalGroups2[group[0]][group[1]] += group[2]

        orderedGroups1 = []
        orderedGroups2 = []
        orderedItems = db.Session.query(db.Items).filter(tuple_(db.Items.defindex, db.Items.quality).in_(keys)).order_by(db.Items.type, db.Items.quality, db.Items.value.desc()).all()
        for orderedItem in orderedItems:
          defindex = orderedItem.defindex
          quality = orderedItem.quality
          if quality in totalGroups1[defindex]:
            orderedGroups1.append([defindex, quality, totalGroups1[defindex][quality]])
          if quality in totalGroups2[defindex]:
            orderedGroups2.append([defindex, quality, totalGroups2[defindex][quality]])
        RBetsTotal1.groups = orderedGroups1
        RBetsTotal2.groups = orderedGroups2

        db.Session.commit()
        return redirect('/bet/' + betID + '/')
    return redirect('/')
开发者ID:Ethanlord300,项目名称:Saloon.tf,代码行数:54,代码来源:bet.py

示例10: get_records

def get_records(_audit, _snapshots):
  """Get Record objects related to provided audit and snapshots"""
  return db.session.query(Record).filter(
      tuple_(
          Record.type,
          Record.key,
          Record.property,
          Record.content
      ).in_(
          {("Snapshot", s.id, "parent", "Audit-{}".format(_audit.id))
           for s in _snapshots}
      ))
开发者ID:Smotko,项目名称:ggrc-core,代码行数:12,代码来源:test_indexing.py

示例11: get_revisions

def get_revisions(pairs, revisions, filters=None):
  """Retrieve revision ids for pairs

  If revisions dictionary is provided it will validate that the selected
  revision exists in the objects revision history.

  Args:
    pairs: set([(parent_1, child_1), (parent_2, child_2), ...])
    revisions: dict({(parent, child): revision_id, ...})
    filters: predicate
  """
  with benchmark("snapshotter.helpers.get_revisions"):
    revision_id_cache = dict()

    if pairs:
      with benchmark("get_revisions.create caches"):
        child_stubs = {pair.child for pair in pairs}

        with benchmark("get_revisions.create child -> parents cache"):
          parents_cache = collections.defaultdict(set)
          for parent, child in pairs:
            parents_cache[child].add(parent)

      with benchmark("get_revisions.retrieve revisions"):
        query = db.session.query(
            models.Revision.id,
            models.Revision.resource_type,
            models.Revision.resource_id).filter(
            tuple_(
                models.Revision.resource_type,
                models.Revision.resource_id).in_(child_stubs)
        ).order_by(models.Revision.id.desc())
        if filters:
          for _filter in filters:
            query = query.filter(_filter)

      with benchmark("get_revisions.create revision_id cache"):
        for revid, restype, resid in query:
          child = Stub(restype, resid)
          for parent in parents_cache[child]:
            key = Pair(parent, child)
            if key in revisions:
              if revid == revisions[key]:
                revision_id_cache[key] = revid
              else:
                logger.warning(
                    "Specified revision for object %s but couldn't find the"
                    "revision '%s' in object history", key, revisions[key])
            else:
              if key not in revision_id_cache:
                revision_id_cache[key] = revid
    return revision_id_cache
开发者ID:VinnieJohns,项目名称:ggrc-core,代码行数:52,代码来源:helpers.py

示例12: query_tiles

def query_tiles(sess, layer, chunk, refresh_cutoff, refresh_cutoff_missing):
    """see existing_tiles; query the set of tiles 'chunk' to see which already exist.
    'cutoff's are timestamps instead of intervals now"""
    q = sess.query(mt.Tile).filter_by(layer=layer).filter(tuple_(mt.Tile.z, mt.Tile.x, mt.Tile.y).in_(list(chunk)))
    def cutoff_criteria():
        if refresh_cutoff is not None:
            yield and_(mt.Tile.uuid != null_digest(), mt.Tile.fetched_on > refresh_cutoff)
        if refresh_cutoff_missing is not None:
            yield and_(mt.Tile.uuid == null_digest(), mt.Tile.fetched_on > refresh_cutoff_missing)
    coc = list(cutoff_criteria())
    if coc:
        q = q.filter(or_(*coc))
    return set((layer, t.z, t.x, t.y) for t in q)
开发者ID:mrgriscom,项目名称:birdseye,代码行数:13,代码来源:mapdownload.py

示例13: get_relationships

def get_relationships(relationships):
  """Retrieve relationships

  Args:
    relationships:
  """
  with benchmark("snapshotter.helpers.get_relationships"):
    if relationships:
      relationship_columns = db.session.query(
          models.Relationship.id,
          models.Relationship.modified_by_id,
          models.Relationship.created_at,
          models.Relationship.updated_at,
          models.Relationship.source_type,
          models.Relationship.source_id,
          models.Relationship.destination_type,
          models.Relationship.destination_id,
          models.Relationship.context_id,
      )

      return relationship_columns.filter(
          tuple_(
              models.Relationship.source_type,
              models.Relationship.source_id,
              models.Relationship.destination_type,
              models.Relationship.destination_id,
          ).in_(relationships)
      ).union(
          relationship_columns.filter(
              tuple_(
                  models.Relationship.destination_type,
                  models.Relationship.destination_id,
                  models.Relationship.source_type,
                  models.Relationship.source_id
              ).in_(relationships)
          )
      )
    else:
      return set()
开发者ID:VinnieJohns,项目名称:ggrc-core,代码行数:39,代码来源:helpers.py

示例14: deactivate_old_records

def deactivate_old_records(dest_connector, dest_table, natural_keys, batch):
    '''deactivates old records in the destination table based on matching records from batch using natural key combination
    :param dest_connector: Destination connection
    :param dest_table: name of the table to be migrated
    :param natural_keys: natural key combination for the dest_table
    :batch batch of records to be verified
    '''
    key_columns = [dest_table.columns[key] for key in natural_keys]
    key_values = [[row[key] for key in natural_keys] for row in batch]

    # update prod rec_status to inactive for records matching with the natural keys of the records in the current batch
    update_query = dest_table.update(and_(dest_table.c.rec_status == 'C',
                                          tuple_(*key_columns).in_(key_values))).values(rec_status='I',
                                                                                        to_date=time.strftime("%Y%m%d"))
    dest_connector.execute(update_query)
开发者ID:SmarterApp,项目名称:RDW_DataWarehouse,代码行数:15,代码来源:migrate_by_row.py

示例15: _prepare_cache

    def _prepare_cache(self, translatable):
        """
        Bulk load translations required to translate a translatable
        'structure'
        """
        translatables = self._collect_translatables(translatable)
        if not translatables:
            return {}

        pks = [(t.context, t.message_id) for t in translatables]
        pk_filter = tuple_(self.model.context, self.model.message_id).in_(pks)
        translations = self.session.query(self.model).filter(
            self.model.language == self.language).filter(pk_filter).values(
            self.model.context, self.model.message_id, self.model.value)
        cache = {(t[0], t[1]): t[2] for t in translations}
        return cache
开发者ID:pombredanne,项目名称:taal,代码行数:16,代码来源:__init__.py


注:本文中的sqlalchemy.sql.expression.tuple_函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。