当前位置: 首页>>代码示例>>Python>>正文


Python models.getSqlDatetime函数代码示例

本文整理汇总了Python中uds.models.getSqlDatetime函数的典型用法代码示例。如果您正苦于以下问题:Python getSqlDatetime函数的具体用法?Python getSqlDatetime怎么用?Python getSqlDatetime使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了getSqlDatetime函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: executeOneJob

    def executeOneJob(self):
        '''
        Looks for the best waiting job and executes it
        '''
        jobInstance = None
        try:
            now = getSqlDatetime()  # Datetimes are based on database server times
            fltr = Q(state=State.FOR_EXECUTE) & (Q(last_execution__gt=now) | Q(next_execution__lt=now))
            with transaction.atomic():
                # If next execution is before now or last execution is in the future (clock changed on this server, we take that task as executable)
                # This params are all set inside fltr (look at __init__)
                job = dbScheduler.objects.select_for_update().filter(fltr).order_by('next_execution')[0]  # @UndefinedVariable
                job.state = State.RUNNING
                job.owner_server = self._hostname
                job.last_execution = now
                job.save()

            jobInstance = job.getInstance()

            if jobInstance is None:
                logger.error('Job instance can\'t be resolved for {0}, removing it'.format(job))
                job.delete()
                return
            logger.debug('Executing job:>{0}<'.format(job.name))
            JobThread(jobInstance, job).start()  # Do not instatiate thread, just run it
        except IndexError:
            # Do nothing, there is no jobs for execution
            return
        except DatabaseError:
            # Whis will happen whenever a connection error or a deadlock error happens
            # This in fact means that we have to retry operation, and retry will happen on main loop
            # Look at this http://dev.mysql.com/doc/refman/5.0/en/innodb-deadlocks.html
            # I have got some deadlock errors, but looking at that url, i found that it is not so abnormal
            # logger.debug('Deadlock, no problem at all :-) (sounds hards, but really, no problem, will retry later :-) )')
            raise DatabaseError('Database access problems. Retrying connection')
开发者ID:AlexeyBychkov,项目名称:openuds,代码行数:35,代码来源:Scheduler.py

示例2: run

 def run(self):
     logger.debug('Publishing')
     servicePoolPub = None
     try:
         now = getSqlDatetime()
         with transaction.atomic():
             servicePoolPub = DeployedServicePublication.objects.select_for_update().get(pk=self._publishId)
             if servicePoolPub.state != State.LAUNCHING:  # If not preparing (may has been canceled by user) just return
                 return
             servicePoolPub.state = State.PREPARING
             servicePoolPub.save()
         pi = servicePoolPub.getInstance()
         state = pi.publish()
         deployedService = servicePoolPub.deployed_service
         deployedService.current_pub_revision += 1
         deployedService.storeValue('toBeReplacedIn', pickle.dumps(now + datetime.timedelta(hours=GlobalConfig.SESSION_EXPIRE_TIME.getInt(True))))
         deployedService.save()
         PublicationFinishChecker.checkAndUpdateState(servicePoolPub, pi, state)
     except DeployedServicePublication.DoesNotExist:  # Deployed service publication has been removed from database, this is ok, just ignore it
         pass
     except Exception:
         logger.exception("Exception launching publication")
         try:
             servicePoolPub.state = State.ERROR
             servicePoolPub.save()
         except Exception:
             logger.error('Error saving ERROR state for pool {}'.format(servicePoolPub))
开发者ID:dkmstr,项目名称:openuds,代码行数:27,代码来源:PublicationManager.py

示例3: _save

    def _save(self, name, content):
        name = self.get_valid_name(name)
        try:
            f = self._dbFileForReadWrite(name)
        except DBFile.DoesNotExist:
            now = getSqlDatetime()
            f = DBFile.objects.create(owner=self.owner, name=name, created=now, modified=now)

        f.data = content.read()
        f.modified = getSqlDatetime()
        f.save()

        # Store on cache also
        self._storeInCache(f)

        return name
开发者ID:glyptodon,项目名称:openuds,代码行数:16,代码来源:FileStorage.py

示例4: __log

    def __log(self, owner_type, owner_id, level, message, source, avoidDuplicates):
        """
        Logs a message associated to owner
        """
        from uds.models import getSqlDatetime
        from uds.models import Log

        # Ensure message fits on space
        message = message[:255]

        qs = Log.objects.filter(owner_id=owner_id, owner_type=owner_type)
        # First, ensure we do not have more than requested logs, and we can put one more log item
        if qs.count() >= GlobalConfig.MAX_LOGS_PER_ELEMENT.getInt():
            for i in qs.order_by('-created',)[GlobalConfig.MAX_LOGS_PER_ELEMENT.getInt() - 1:]:
                i.delete()

        if avoidDuplicates is True:
            try:
                lg = Log.objects.filter(owner_id=owner_id, owner_type=owner_type, level=level, source=source).order_by('-created', '-id')[0]
                if lg.message == message:
                    # Do not log again, already logged
                    return
            except Exception:  # Do not exists log
                pass

        # now, we add new log
        try:
            Log.objects.create(owner_type=owner_type, owner_id=owner_id, created=getSqlDatetime(), source=source, level=level, data=message)
        except:
            # Some objects will not get logged, such as System administrator objects
            pass
开发者ID:dkmstr,项目名称:openuds,代码行数:31,代码来源:LogManager.py

示例5: publish

    def publish(self, servicePool, changeLog=None):  # pylint: disable=no-self-use
        '''
        Initiates the publication of a service pool, or raises an exception if this cannot be done
        :param servicePool: Service pool object (db object)
        '''
        if servicePool.publications.filter(state__in=State.PUBLISH_STATES).count() > 0:
            raise PublishException(_('Already publishing. Wait for previous publication to finish and try again'))

        if servicePool.isInMaintenance():
            raise PublishException(_('Service is in maintenance mode and new publications are not allowed'))

        try:
            now = getSqlDatetime()
            dsp = None
            dsp = servicePool.publications.create(state=State.LAUNCHING, state_date=now, publish_date=now, revision=servicePool.current_pub_revision)
            if changeLog:
                servicePool.changelog.create(revision=servicePool.current_pub_revision, log=changeLog, stamp=now)
            DelayedTaskRunner.runner().insert(PublicationLauncher(dsp), 4, PUBTAG + str(dsp.id))
        except Exception as e:
            logger.debug('Caught exception at publish: {0}'.format(e))
            if dsp is not None:
                try:
                    dsp.delete()
                except Exception:
                    logger.info('Could not delete {}'.format(dsp))
            raise PublishException(str(e))
开发者ID:AlexeyBychkov,项目名称:openuds,代码行数:26,代码来源:PublicationManager.py

示例6: executeOneDelayedTask

    def executeOneDelayedTask(self):
        now = getSqlDatetime()
        filt = Q(execution_time__lt=now) | Q(insert_date__gt=now + timedelta(seconds=30))
        # If next execution is before now or last execution is in the future (clock changed on this server, we take that task as executable)
        try:
            with transaction.atomic():  # Encloses
                # Throws exception if no delayed task is avilable
                task = dbDelayedTask.objects.select_for_update().filter(filt).order_by('execution_time')[0]  # @UndefinedVariable
                if task.insert_date > now + timedelta(seconds=30):
                    logger.warning('EXecuted {} due to insert_date being in the future!'.format(task.type))
                taskInstanceDump = encoders.decode(task.instance, 'base64')
                task.delete()
            taskInstance = loads(taskInstanceDump)
        except IndexError:
            return  # No problem, there is no waiting delayed task
        except Exception:
            # Transaction have been rolled back using the "with atomic", so here just return
            # Note that is taskInstance can't be loaded, this task will not be retried
            logger.exception('Executing one task')
            return

        if taskInstance is not None:
            logger.debug('Executing delayedTask:>{0}<'.format(task))
            taskInstance.env = Environment.getEnvForType(taskInstance.__class__)
            DelayedTaskThread(taskInstance).start()
开发者ID:dkmstr,项目名称:openuds,代码行数:25,代码来源:DelayedTaskRunner.py

示例7: addCounter

    def addCounter(self, owner_type, owner_id, counterType, counterValue, stamp=None):
        '''
        Adds a new counter stats to database.

        Args:

            owner_type: type of owner (integer, from internal tables)
            owner_id:  id of the owner
            counterType: The type of counter that will receive the value (look at uds.core.util.stats.counters module)
            counterValue: Counter to store. Right now, this must be an integer value (-2G ~ 2G)
            stamp: if not None, this will be used as date for cuounter, else current date/time will be get
                   (this has a granurality of seconds)

        Returns:

            Nothing
        '''
        if stamp is None:
            stamp = getSqlDatetime()

        # To Unix epoch
        stamp = int(time.mktime(stamp.timetuple()))  # pylint: disable=maybe-no-member

        try:
            StatsCounters.objects.create(owner_type=owner_type, owner_id=owner_id, counter_type=counterType, value=counterValue, stamp=stamp)
            return True
        except Exception:
            logger.error('Exception handling counter stats saving (maybe database is full?)')
        return False
开发者ID:aiminickwong,项目名称:openuds,代码行数:29,代码来源:StatsManager.py

示例8: run

 def run(self):
     """
     Look for "hanged" scheduler tasks and reset them
     """
     since = getSqlDatetime() - timedelta(minutes=15)
     with transaction.atomic():
         Scheduler.objects.select_for_update().filter(last_execution__lt=since, state=State.RUNNING).update(owner_server='', state=State.FOR_EXECUTE)
开发者ID:dkmstr,项目名称:openuds,代码行数:7,代码来源:SchedulerHouseKeeping.py

示例9: getServicesPoolsCounters

def getServicesPoolsCounters(servicePool, counter_type):
    # pylint: disable=no-value-for-parameter
    try:
        cacheKey = (servicePool and servicePool.id or 'all') + str(counter_type) + str(POINTS) + str(SINCE)
        to = getSqlDatetime()
        since = to - timedelta(days=SINCE)
        val = cache.get(cacheKey)
        if val is None:
            if servicePool is None:
                us = DeployedService()
                complete = True  # Get all deployed services stats
            else:
                us = servicePool
                complete = False
            val = []
            for x in counters.getCounters(us, counter_type, since=since, to=to, limit=POINTS, use_max=USE_MAX, all=complete):
                val.append({'stamp': x[0], 'value': int(x[1])})
            if len(val) > 2:
                cache.put(cacheKey, pickle.dumps(val).encode('zip'), 600)
            else:
                val = [{'stamp': since, 'value': 0}, {'stamp': to, 'value': 0}]
        else:
            val = pickle.loads(val.decode('zip'))

        return val
    except:
        logger.exception('exception')
        raise ResponseError('can\'t create stats for objects!!!')
开发者ID:AlexeyBychkov,项目名称:openuds,代码行数:28,代码来源:system.py

示例10: get

 def get(self, rangeStart=0, rangeEnd=MAX_SEQ):
     '''
     Tries to generate a new unique id in the range provided. This unique id
     is global to "unique ids' database
     '''
     # First look for a name in the range defined
     stamp = getSqlDatetime(True)
     # logger.debug(UniqueId)
     try:
         UniqueId.objects.lock()  # @UndefinedVariable
         flt = self.__filter(rangeStart, rangeEnd)
         try:
             item = flt.filter(assigned=False).order_by('seq')[0]
             UniqueId.objects.filter(id=item.id).update(owner=self._owner, assigned=True, stamp=stamp)  # @UndefinedVariable
             seq = item.seq
         except Exception:  # No free element found
             try:
                 last = flt.filter(assigned=True)[0]  # DB Returns correct order so the 0 item is the last
                 seq = last.seq + 1
             except Exception:  # If there is no assigned at database
                 seq = rangeStart
             # logger.debug('Found seq {0}'.format(seq))
             if seq > rangeEnd:
                 return -1  # No ids free in range
             UniqueId.objects.create(owner=self._owner, basename=self._baseName, seq=seq, assigned=True, stamp=stamp)  # @UndefinedVariable
         logger.debug('Seq: {}'.format(seq))
         return seq
     except Exception:
         logger.exception('Generating unique id sequence')
         return None
     finally:
         UniqueId.objects.unlock()  # @UndefinedVariable
开发者ID:joaoguariglia,项目名称:openuds,代码行数:32,代码来源:UniqueIDGenerator.py

示例11: releaseOlderThan

 def releaseOlderThan(self, stamp=None):
     stamp = getSqlDatetime(True) if stamp == None else stamp
     try:
         UniqueId.objects.lock()  # @UndefinedVariable
         UniqueId.objects.filter(owner=self._owner, stamp__lt=stamp).update(assigned=False, owner='', stamp=stamp)  # @UndefinedVariable
         self.__purge()
     finally:
         UniqueId.objects.unlock()  # @UndefinedVariable
开发者ID:joaoguariglia,项目名称:openuds,代码行数:8,代码来源:UniqueIDGenerator.py

示例12: __doCleanup

    def __doCleanup(self, model):
        minTime = time.mktime((getSqlDatetime() - datetime.timedelta(days=GlobalConfig.STATS_DURATION.getInt())).timetuple())

        # Newer Django versions (at least 1.7) does this deletions as it must (executes a DELETE FROM ... WHERE...)
        model.objects.filter(stamp__lt=minTime).delete()

        # Optimize mysql tables after deletions
        optimizeTable(model._meta.db_table)
开发者ID:aiminickwong,项目名称:openuds,代码行数:8,代码来源:StatsManager.py

示例13: releaseOwnShedules

 def releaseOwnShedules():
     """
     Releases all scheduleds being executed by this server
     """
     logger.debug('Releasing all owned scheduled tasks')
     with transaction.atomic():
         dbScheduler.objects.select_for_update().filter(owner_server=platform.node()).update(owner_server='')  # @UndefinedVariable
         dbScheduler.objects.select_for_update().filter(last_execution__lt=getSqlDatetime() - timedelta(minutes=15), state=State.RUNNING).update(owner_server='', state=State.FOR_EXECUTE)  # @UndefinedVariable
         dbScheduler.objects.select_for_update().filter(owner_server='').update(state=State.FOR_EXECUTE)  # @UndefinedVariable
开发者ID:glyptodon,项目名称:openuds,代码行数:9,代码来源:Scheduler.py

示例14: run

 def run(self):
     removeFrom = getSqlDatetime() - timedelta(seconds=10)  # We keep at least 10 seconds the machine before removing it, so we avoid connections errors
     removables = UserService.objects.filter(state=State.REMOVABLE, state_date__lt=removeFrom,
                                             deployed_service__service__provider__maintenance_mode=False)[0:UserServiceRemover.removeAtOnce]
     for us in removables:
         try:
             UserServiceManager.manager().remove(us)
         except Exception:
             logger.exception('Exception invoking remove user service {}'.format(us))
开发者ID:AlexeyBychkov,项目名称:openuds,代码行数:9,代码来源:UserServiceCleaner.py

示例15: release

 def release(self):
     try:
         UniqueId.objects.lock()  # @UndefinedVariable
         UniqueId.objects.filter(owner=self._owner).update(
             assigned=False, owner="", stamp=getSqlDatetime(True)
         )  # @UndefinedVariable
         self.__purge()
     finally:
         UniqueId.objects.unlock()  # @UndefinedVariable
开发者ID:AlexeyBychkov,项目名称:openuds,代码行数:9,代码来源:UniqueIDGenerator.py


注:本文中的uds.models.getSqlDatetime函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。