当前位置: 首页>>代码示例>>Python>>正文


Python ThreadedConnectionPool.closeall方法代码示例

本文整理汇总了Python中psycopg2.pool.ThreadedConnectionPool.closeall方法的典型用法代码示例。如果您正苦于以下问题:Python ThreadedConnectionPool.closeall方法的具体用法?Python ThreadedConnectionPool.closeall怎么用?Python ThreadedConnectionPool.closeall使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在psycopg2.pool.ThreadedConnectionPool的用法示例。


在下文中一共展示了ThreadedConnectionPool.closeall方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_context_manager

# 需要导入模块: from psycopg2.pool import ThreadedConnectionPool [as 别名]
# 或者: from psycopg2.pool.ThreadedConnectionPool import closeall [as 别名]
    def test_context_manager(self):
        """
        test using the context manager to access the pool 
        """
        min_connections = 1
        max_connections = 5
        test_number = 42

        connection_pool = ThreadedConnectionPool(min_connections,
                                                 max_connections,
                                                 **_database_credentials)

        test_greenlet = ContextWriteGreenlet(connection_pool, test_number, 3.0)
        rollback_greenlet = ContextRollbackGreenlet(connection_pool, 3.0)

        test_greenlet.start()
        rollback_greenlet.start()

        test_greenlet.join()
        self.assertTrue(test_greenlet.successful())

        rollback_greenlet.join()
        self.assertTrue(rollback_greenlet.successful())

        result = test_greenlet.value
        self.assertEqual(result, [(test_number, )])

        connection_pool.closeall()
开发者ID:HackLinux,项目名称:nimbus.io,代码行数:30,代码来源:test_greenlet_connection_pool.py

示例2: test_decorator

# 需要导入模块: from psycopg2.pool import ThreadedConnectionPool [as 别名]
# 或者: from psycopg2.pool.ThreadedConnectionPool import closeall [as 别名]
    def test_decorator(self):
        """
        test using the decorator to access the pool 
        """
        global _connection_pool
        min_connections = 1
        max_connections = 5
        test_number = 42

        _connection_pool = ThreadedConnectionPool(min_connections,
                                                  max_connections,
                                                  **_database_credentials)

        test_greenlet = DecoratorWriteGreenlet(test_number, 3.0)
        rollback_greenlet = DecoratorRollbackGreenlet(3.0)

        test_greenlet.start()
        rollback_greenlet.start()

        test_greenlet.join()
        self.assertTrue(test_greenlet.successful())

        rollback_greenlet.join()
        self.assertTrue(rollback_greenlet.successful())

        result = test_greenlet.value
        self.assertEqual(result, [(test_number, )])

        _connection_pool.closeall()
开发者ID:HackLinux,项目名称:nimbus.io,代码行数:31,代码来源:test_greenlet_connection_pool.py

示例3: __init__

# 需要导入模块: from psycopg2.pool import ThreadedConnectionPool [as 别名]
# 或者: from psycopg2.pool.ThreadedConnectionPool import closeall [as 别名]
class PgConnectionPool:

    def __init__(self, *args, min_conns=1, keep_conns=10, max_conns=10,
                 **kwargs):
        self._pool = ThreadedConnectionPool(
            min_conns, max_conns, *args, **kwargs)
        self._keep_conns = keep_conns

    def acquire(self):
        pool = self._pool
        conn = pool.getconn()
        pool.minconn = min(self._keep_conns, len(pool._used))
        return conn

    def release(self, conn):
        self._pool.putconn(conn)

    def close(self):
        if hasattr(self, '_pool'):
            self._pool.closeall()

    __del__ = close
开发者ID:jampp,项目名称:macross-gcd,代码行数:24,代码来源:store.py

示例4: Database

# 需要导入模块: from psycopg2.pool import ThreadedConnectionPool [as 别名]
# 或者: from psycopg2.pool.ThreadedConnectionPool import closeall [as 别名]
class Database(DatabaseInterface):

    _databases = {}
    _connpool = None
    _list_cache = None
    _list_cache_timestamp = None
    _version_cache = {}

    def __new__(cls, database_name="template1"):
        if database_name in cls._databases:
            return cls._databases[database_name]
        return DatabaseInterface.__new__(cls, database_name=database_name)

    def __init__(self, database_name="template1"):
        super(Database, self).__init__(database_name=database_name)
        self._databases.setdefault(database_name, self)

    def connect(self):
        if self._connpool is not None:
            return self
        logger = logging.getLogger("database")
        logger.info('connect to "%s"' % self.database_name)
        host = CONFIG["db_host"] and "host=%s" % CONFIG["db_host"] or ""
        port = CONFIG["db_port"] and "port=%s" % CONFIG["db_port"] or ""
        name = "dbname=%s" % self.database_name
        user = CONFIG["db_user"] and "user=%s" % CONFIG["db_user"] or ""
        password = CONFIG["db_password"] and "password=%s" % CONFIG["db_password"] or ""
        minconn = int(CONFIG["db_minconn"]) or 1
        maxconn = int(CONFIG["db_maxconn"]) or 64
        dsn = "%s %s %s %s %s" % (host, port, name, user, password)
        self._connpool = ThreadedConnectionPool(minconn, maxconn, dsn)
        return self

    def cursor(self, autocommit=False, readonly=False):
        if self._connpool is None:
            self.connect()
        conn = self._connpool.getconn()
        if autocommit:
            conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
        else:
            conn.set_isolation_level(ISOLATION_LEVEL_REPEATABLE_READ)
        cursor = Cursor(self._connpool, conn, self)
        if readonly:
            cursor.execute("SET TRANSACTION READ ONLY")
        return cursor

    def close(self):
        if self._connpool is None:
            return
        self._connpool.closeall()
        self._connpool = None

    def create(self, cursor, database_name):
        cursor.execute('CREATE DATABASE "' + database_name + '" ' "TEMPLATE template0 ENCODING 'unicode'")
        Database._list_cache = None

    def drop(self, cursor, database_name):
        cursor.execute('DROP DATABASE "' + database_name + '"')
        Database._list_cache = None

    def get_version(self, cursor):
        if self.database_name not in self._version_cache:
            cursor.execute("SELECT version()")
            version, = cursor.fetchone()
            self._version_cache[self.database_name] = tuple(map(int, RE_VERSION.search(version).groups()))
        return self._version_cache[self.database_name]

    @staticmethod
    def dump(database_name):
        from trytond.tools import exec_pg_command_pipe

        cmd = ["pg_dump", "--format=c", "--no-owner"]
        if CONFIG["db_user"]:
            cmd.append("--username=" + CONFIG["db_user"])
        if CONFIG["db_host"]:
            cmd.append("--host=" + CONFIG["db_host"])
        if CONFIG["db_port"]:
            cmd.append("--port=" + CONFIG["db_port"])
        cmd.append(database_name)

        pipe = exec_pg_command_pipe(*tuple(cmd))
        pipe.stdin.close()
        data = pipe.stdout.read()
        res = pipe.wait()
        if res:
            raise Exception("Couldn't dump database!")
        return data

    @staticmethod
    def restore(database_name, data):
        from trytond.tools import exec_pg_command_pipe

        database = Database().connect()
        cursor = database.cursor(autocommit=True)
        database.create(cursor, database_name)
        cursor.commit()
        cursor.close()

        cmd = ["pg_restore", "--no-owner"]
        if CONFIG["db_user"]:
#.........这里部分代码省略.........
开发者ID:openlabs,项目名称:trytond,代码行数:103,代码来源:database.py

示例5: handle

# 需要导入模块: from psycopg2.pool import ThreadedConnectionPool [as 别名]
# 或者: from psycopg2.pool.ThreadedConnectionPool import closeall [as 别名]
    def handle(self, *args, **options):

        self.mturk_email = getattr(settings, 'MTURK_AUTH_EMAIL', None)
        self.mturk_password = getattr(settings, 'MTURK_AUTH_PASSWORD', None)

        _start_time = time.time()
        pid = Pid('mturk_crawler', True)
        log.info('crawler started: %s;;%s', args, options)

        if options.get('mturk_email'):
            self.mturk_email = options['mturk_email']
        if options.get('mturk_password'):
            self.mturk_password = options['mturk_password']

        if options.get('logconf', None):
            self.setup_logging(options['logconf'])

        if options.get('debug', False):
            self.setup_debug()
            print 'Current proccess pid: %s' % pid.actual_pid
            print ('To debug, type: python -c "import os,signal; '
                'os.kill(%s, signal.SIGUSR1)"\n') % pid.actual_pid

        self.maxworkers = options['workers']
        if self.maxworkers > 9:
            # If you want to remote this limit, don't forget to change dbpool
            # object maximum number of connections. Each worker should fetch
            # 10 hitgroups and spawn single task for every one of them, that
            # will get private connection instance. So for 9 workers it's
            # already 9x10 = 90 connections required
            #
            # Also, for too many workers, amazon isn't returning valid data
            # and retrying takes much longer than using smaller amount of
            # workers
            sys.exit('Too many workers (more than 9). Quit.')
        start_time = datetime.datetime.now()

        hits_available = tasks.hits_mainpage_total()
        groups_available = tasks.hits_groups_total()

        # create crawl object that will be filled with data later
        crawl = Crawl.objects.create(
                start_time=start_time,
                end_time=start_time,
                success=True,
                hits_available=hits_available,
                hits_downloaded=0,
                groups_available=groups_available,
                groups_downloaded=groups_available)
        log.debug('fresh crawl object created: %s', crawl.id)

        # fetch those requester profiles so we could decide if their hitgroups
        # are public or not
        reqesters = RequesterProfile.objects.all_as_dict()

        dbpool = ThreadedConnectionPool(10, 90,
            'dbname=%s user=%s password=%s' % (
                settings.DATABASES['default']['NAME'],
                settings.DATABASES['default']['USER'],
                settings.DATABASES['default']['PASSWORD']))
        # collection of group_ids that were already processed - this should
        # protect us from duplicating data
        processed_groups = set()
        total_reward = 0
        hitgroups_iter = self.hits_iter()

        for hg_pack in hitgroups_iter:
            jobs = []
            for hg in hg_pack:
                if hg['group_id'] in processed_groups:
                    log.debug('Group already in processed_groups, skipping.')
                    continue
                processed_groups.add(hg['group_id'])

                j = gevent.spawn(tasks.process_group,
                        hg, crawl.id, reqesters, processed_groups, dbpool)
                jobs.append(j)
                total_reward += hg['reward'] * hg['hits_available']
            log.debug('processing pack of hitgroups objects')
            gevent.joinall(
                jobs, timeout=settings.CRAWLER_GROUP_PROCESSING_TIMEOUT)
            # check if all jobs ended successfully
            for job in jobs:
                if not job.ready():
                    log.error('Killing job: %s', job)
                    job.kill()

            if len(processed_groups) >= groups_available:
                log.info('Skipping empty groups.')
                # there's no need to iterate over empty groups.. break
                break

            # amazon does not like too many requests at once, so give them a
            # quick rest...
            gevent.sleep(1)

        dbpool.closeall()

        # update crawler object
        crawl.groups_downloaded = len(processed_groups)
#.........这里部分代码省略.........
开发者ID:devinshields,项目名称:Mturk-Tracker,代码行数:103,代码来源:crawl.py

示例6: connection

# 需要导入模块: from psycopg2.pool import ThreadedConnectionPool [as 别名]
# 或者: from psycopg2.pool.ThreadedConnectionPool import closeall [as 别名]
class connection(object):

    def __init__(self,url=None,hstore=False,log=None,logf=None,min=1,max=5,
                               default_cursor=DictCursor):
        params = urlparse.urlparse(url or 
                                   os.environ.get('DATABASE_URL') or 
                                   'postgres://localhost/')
        self.pool = ThreadedConnectionPool(min,max,
                                           database=params.path[1:],
                                           user=params.username,
                                           password=params.password,
                                           host=params.hostname,
                                           port=params.port,
                    )
        self.hstore = hstore
        self.log = log
        self.logf = logf or (lambda cursor : cursor.query)
        self.default_cursor = default_cursor
        self.prepared_statement_id = 0

    def prepare(self,statement,params=None,name=None,call_type=None):
        """
            >>> db = connection()
            >>> p1 = db.prepare('SELECT name FROM doctest_t1 WHERE id = $1')
            >>> p2 = db.prepare('UPDATE doctest_t1 set name = $2 WHERE id = $1',('int','text'))
            >>> db.execute(p2,(1,'xxxxx'))
            1
            >>> db.query_one(p1,(1,))
            ['xxxxx']
            >>> db.execute(p2,(1,'aaaaa'))
            1
            >>> db.query_one(p1,(1,))
            ['aaaaa']
        """
        if not name:
            self.prepared_statement_id += 1
            name = '_pstmt_%03.3d' % self.prepared_statement_id
        if params:
            params = '(' + ','.join(params) + ')'
        else:
            params = ''
        with self.cursor() as c:
            c.execute('PREPARE %s %s AS %s' % (name,params,statement))
        if call_type is None:
            if statement.lower().startswith('select'):
                call_type = 'query'
            else:
                call_type = 'execute'
        return PreparedStatement(self,name,call_type)

    def shutdown(self):
        if self.pool:
            self.pool.closeall()
            self.pool = None

    def cursor(self,cursor_factory=None):
        return cursor(self.pool,
                      cursor_factory or self.default_cursor,
                      self.hstore,
                      self.log,
                      self.logf)

    def __del__(self):
        self.shutdown()

    def __getattr__(self,name):
        def _wrapper(*args,**kwargs):
            with self.cursor() as c:
                return getattr(c,name)(*args,**kwargs)
        return _wrapper
开发者ID:ephillipe,项目名称:pgwrap,代码行数:72,代码来源:db.py

示例7: Database

# 需要导入模块: from psycopg2.pool import ThreadedConnectionPool [as 别名]
# 或者: from psycopg2.pool.ThreadedConnectionPool import closeall [as 别名]
class Database(rigor.database.Database):
	""" Container for a database connection pool """

	def __init__(self, database):
		super(Database, self).__init__(database)
		register_type(psycopg2.extensions.UNICODE)
		register_uuid()
		dsn = Database.build_dsn(database)
		self._pool = ThreadedConnectionPool(config.get('database', 'min_database_connections'), config.get('database', 'max_database_connections'), dsn)

	@staticmethod
	def build_dsn(database):
		""" Builds the database connection string from config values """
		dsn = "dbname='{0}' host='{1}'".format(database, config.get('database', 'host'))
		try:
			ssl = config.getboolean('database', 'ssl')
			if ssl:
				dsn += " sslmode='require'"
		except ConfigParser.Error:
			pass
		try:
			username = config.get('database', 'username')
			dsn += " user='{0}'".format(username)
		except ConfigParser.Error:
			pass
		try:
			password = config.get('database', 'password')
			dsn += " password='{0}'".format(password)
		except ConfigParser.Error:
			pass
		return dsn

	@staticmethod
	@template
	def create(name):
		""" Creates a new database with the given name """
		return "CREATE DATABASE {0};".format(name)

	@staticmethod
	@template
	def drop(name):
		""" Drops the database with the given name """
		return "DROP DATABASE {0};".format(name)

	@staticmethod
	@template
	def clone(source, destination):
		"""
		Copies the source database to a new destination database.  This may fail if
		the source database is in active use.
		"""
		return "CREATE DATABASE {0} WITH TEMPLATE {1};".format(destination, source)

	@contextmanager
	def get_cursor(self, commit=True):
		""" Gets a cursor from a connection in the pool """
		connection = self._pool.getconn()
		cursor = connection.cursor(cursor_factory=RigorCursor)
		try:
			yield cursor
		except psycopg2.IntegrityError as error:
			exc_info = sys.exc_info()
			self.rollback(cursor)
			raise rigor.database.IntegrityError, exc_info[1], exc_info[2]
		except psycopg2.DatabaseError as error:
			exc_info = sys.exc_info()
			self.rollback(cursor)
			raise rigor.database.DatabaseError, exc_info[1], exc_info[2]
		except:
			exc_info = sys.exc_info()
			self.rollback(cursor)
			raise exc_info[0], exc_info[1], exc_info[2]
		else:
			if commit:
				self.commit(cursor)
			else:
				self.rollback(cursor)

	def _close_cursor(self, cursor):
		""" Closes a cursor and releases the connection to the pool """
		cursor.close()
		self._pool.putconn(cursor.connection)

	def commit(self, cursor):
		""" Commits the transaction, then closes the cursor """
		cursor.connection.commit()
		self._close_cursor(cursor)

	def rollback(self, cursor):
		""" Rolls back the transaction, then closes the cursor """
		cursor.connection.rollback()
		self._close_cursor(cursor)

	def __del__(self):
		self._pool.closeall()
开发者ID:mindis,项目名称:rigor,代码行数:97,代码来源:psycopg2_adapter.py

示例8: exit

# 需要导入模块: from psycopg2.pool import ThreadedConnectionPool [as 别名]
# 或者: from psycopg2.pool.ThreadedConnectionPool import closeall [as 别名]
        dbname = cp.get('database','dbname')
        dbuser = cp.get('database','user')
        dbhost = cp.get('database','host')
        dbpass = cp.get('database','dbpass')
        dbpoolSize = cp.get('database','dbpoolSize')
    except ConfigParser.NoOptionError, e:
        print "TBDB.cfg: missing parameter"
        exit(1)

    # Create DB connection pool
    dbpool = ThreadedConnectionPool(2, int(dbpoolSize), "dbname='%s' user='%s' host='%s' password='%s'"%(dbname,dbuser,dbhost,dbpass))
    # Starts each channel/thread
    for line in file(nodesCfgFile):
        print line
        if line[:1] != '#' and len(line.strip()) > 0:
            moteId,local_port,dev_addr,dev_port = line.split()
            #settings.append((int(moteId),int(local_port),dev_addr,int(dev_port)))
            forwarder('',int(local_port),dev_addr,int(dev_port), int(moteId))
    try:
        asyncore.loop()
    except KeyboardInterrupt, e:
        print e
    except asyncore.ExitNow, e:
        print e
    # close all DB Pool Connection 
    print "Closing DB connection pool"
    dbpool.closeall()



开发者ID:akappaun,项目名称:Testbed-1,代码行数:29,代码来源:TBrelay.py

示例9: Database

# 需要导入模块: from psycopg2.pool import ThreadedConnectionPool [as 别名]
# 或者: from psycopg2.pool.ThreadedConnectionPool import closeall [as 别名]
class Database(DatabaseInterface):

    _databases = {}
    _connpool = None
    _list_cache = None
    _list_cache_timestamp = None
    _version_cache = {}

    def __new__(cls, database_name='template1'):
        if database_name in cls._databases:
            return cls._databases[database_name]
        return DatabaseInterface.__new__(cls, database_name=database_name)

    def __init__(self, database_name='template1'):
        super(Database, self).__init__(database_name=database_name)
        self._databases.setdefault(database_name, self)

    def connect(self):
        if self._connpool is not None:
            return self
        logger = logging.getLogger('database')
        logger.info('connect to "%s"' % self.database_name)
        host = CONFIG['db_host'] and "host=%s" % CONFIG['db_host'] or ''
        port = CONFIG['db_port'] and "port=%s" % CONFIG['db_port'] or ''
        name = "dbname=%s" % self.database_name
        user = CONFIG['db_user'] and "user=%s" % CONFIG['db_user'] or ''
        password = CONFIG['db_password'] \
                and "password=%s" % CONFIG['db_password'] or ''
        minconn = int(CONFIG['db_minconn']) or 1
        maxconn = int(CONFIG['db_maxconn']) or 64
        dsn = '%s %s %s %s %s' % (host, port, name, user, password)
        self._connpool = ThreadedConnectionPool(minconn, maxconn, dsn)
        return self

    def cursor(self, autocommit=False, readonly=False):
        if self._connpool is None:
            self.connect()
        conn = self._connpool.getconn()
        if autocommit:
            conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
        else:
            conn.set_isolation_level(ISOLATION_LEVEL_REPEATABLE_READ)
        cursor = Cursor(self._connpool, conn, self)
        # TODO change for set_session
        if readonly:
            cursor.execute('SET TRANSACTION READ ONLY')
        return cursor

    def close(self):
        if self._connpool is None:
            return
        self._connpool.closeall()
        self._connpool = None

    def create(self, cursor, database_name):
        cursor.execute('CREATE DATABASE "' + database_name + '" ' \
                'TEMPLATE template0 ENCODING \'unicode\'')
        Database._list_cache = None

    def drop(self, cursor, database_name):
        cursor.execute('DROP DATABASE "' + database_name + '"')
        Database._list_cache = None

    def get_version(self, cursor):
        if self.database_name not in self._version_cache:
            cursor.execute('SELECT version()')
            version, = cursor.fetchone()
            self._version_cache[self.database_name] = tuple(map(int,
                RE_VERSION.search(version).groups()))
        return self._version_cache[self.database_name]

    @staticmethod
    def dump(database_name):
        from trytond.tools import exec_pg_command_pipe

        cmd = ['pg_dump', '--format=c', '--no-owner']
        if CONFIG['db_user']:
            cmd.append('--username=' + CONFIG['db_user'])
        if CONFIG['db_host']:
            cmd.append('--host=' + CONFIG['db_host'])
        if CONFIG['db_port']:
            cmd.append('--port=' + CONFIG['db_port'])
        cmd.append(database_name)

        pipe = exec_pg_command_pipe(*tuple(cmd))
        pipe.stdin.close()
        data = pipe.stdout.read()
        res = pipe.wait()
        if res:
            raise Exception('Couldn\'t dump database!')
        return data

    @staticmethod
    def restore(database_name, data):
        from trytond.tools import exec_pg_command_pipe

        database = Database().connect()
        cursor = database.cursor(autocommit=True)
        database.create(cursor, database_name)
        cursor.commit()
#.........这里部分代码省略.........
开发者ID:mediafactory,项目名称:tryton_core_daemon,代码行数:103,代码来源:database.py

示例10: handle

# 需要导入模块: from psycopg2.pool import ThreadedConnectionPool [as 别名]
# 或者: from psycopg2.pool.ThreadedConnectionPool import closeall [as 别名]
    def handle(self, *args, **options):

        self.mturk_email = getattr(settings, 'MTURK_AUTH_EMAIL', None)
        self.mturk_password = getattr(settings, 'MTURK_AUTH_PASSWORD', None)

        _start_time = time.time()
        pid = Pid('mturk_crawler', True)
        log.info('crawler started: %s;;%s', args, options)

        if options.get('mturk_email'):
            self.mturk_email = options['mturk_email']
        if options.get('mturk_password'):
            self.mturk_password = options['mturk_password']

        if options.get('logconf', None):
            self.setup_logging(options['logconf'])

        if options.get('debug', False):
            self.setup_debug()
            print 'Current proccess pid: %s' % pid.actual_pid
            print ('To debug, type: python -c "import os,signal; '
                'os.kill(%s, signal.SIGUSR1)"\n') % pid.actual_pid

        self.maxworkers = options['workers']
        if self.maxworkers > 9:
            # If you want to remote this limit, don't forget to change dbpool
            # object maximum number of connections. Each worker should fetch
            # 10 hitgroups and spawn single task for every one of them, that
            # will get private connection instance. So for 9 workers it's
            # already 9x10 = 90 connections required
            #
            # Also, for too many workers, amazon isn't returning valid data
            # and retrying takes much longer than using smaller amount of
            # workers
            sys.exit('Too many workers (more than 9). Quit.')
        start_time = datetime.datetime.now()

        hits_available = tasks.hits_mainpage_total()
        groups_available = tasks.hits_groups_total()

        # create crawl object that will be filled with data later
        crawl = Crawl.objects.create(
                start_time=start_time,
                end_time=datetime.datetime.now(),
                success=True,
                hits_available=hits_available,
                hits_downloaded=0,
                groups_available=groups_available,
                groups_downloaded=groups_available)
        log.debug('fresh crawl object created: %s', crawl.id)

        # fetch those requester profiles so we could decide if their hitgroups
        # are public or not
        reqesters = RequesterProfile.objects.all_as_dict()

        dbpool = ThreadedConnectionPool(10, 90,
            'dbname=%s user=%s password=%s' % (settings.DATABASE_NAME,
                settings.DATABASE_USER, settings.DATABASE_PASSWORD))
        # collection of group_ids that were already processed - this should
        # protect us from duplicating data
        processed_groups = set()
        total_reward = 0
        hitgroups_iter = self.hits_iter()

        for hg_pack in hitgroups_iter:
            jobs = []
            for hg in hg_pack:
                j = gevent.spawn(tasks.process_group,
                        hg, crawl.id, reqesters, processed_groups, dbpool)
                jobs.append(j)
                total_reward += hg['reward'] * hg['hits_available']
            log.debug('processing pack of hitgroups objects')
            gevent.joinall(jobs, timeout=20)
            # check if all jobs ended successfully
            for job in jobs:
                if not job.ready():
                    log.error('Killing job: %s', job)
                    job.kill()

            if len(processed_groups) >= groups_available:
                log.info('Skipping empty groups.')
                # there's no need to iterate over empty groups.. break
                break

            # amazon does not like too many requests at once, so give them a
            # quick rest...
            gevent.sleep(1)

        dbpool.closeall()

        # update crawler object
        crawl.groups_downloaded = len(processed_groups)
        crawl.end_time = datetime.datetime.now()
        crawl.save()

        work_time = time.time() - _start_time
        log.info('created crawl id: %s', crawl.id)
        log.info('total reward value: %s', total_reward)
        log.info('processed hits groups downloaded: %s', len(processed_groups))
        log.info('processed hits groups available: %s', groups_available)
#.........这里部分代码省略.........
开发者ID:knightelvis,项目名称:Mturk-Tracker,代码行数:103,代码来源:crawl.py

示例11: Database

# 需要导入模块: from psycopg2.pool import ThreadedConnectionPool [as 别名]
# 或者: from psycopg2.pool.ThreadedConnectionPool import closeall [as 别名]
class Database(DatabaseInterface):

    _databases = {}
    _connpool = None
    _list_cache = None
    _list_cache_timestamp = None
    _version_cache = {}
    flavor = Flavor(ilike=True)

    def __new__(cls, database_name='template1'):
        if database_name in cls._databases:
            return cls._databases[database_name]
        return DatabaseInterface.__new__(cls, database_name=database_name)

    def __init__(self, database_name='template1'):
        super(Database, self).__init__(database_name=database_name)
        self._databases.setdefault(database_name, self)

    def connect(self):
        if self._connpool is not None:
            return self
        logger.info('connect to "%s"', self.database_name)
        uri = parse_uri(config.get('database', 'uri'))
        assert uri.scheme == 'postgresql'
        host = uri.hostname and "host=%s" % uri.hostname or ''
        port = uri.port and "port=%s" % uri.port or ''
        name = "dbname=%s" % self.database_name
        user = uri.username and "user=%s" % uri.username or ''
        password = ("password=%s" % urllib.unquote_plus(uri.password)
            if uri.password else '')
        minconn = config.getint('database', 'minconn', default=1)
        maxconn = config.getint('database', 'maxconn', default=64)
        dsn = '%s %s %s %s %s' % (host, port, name, user, password)
        self._connpool = ThreadedConnectionPool(minconn, maxconn, dsn)
        return self

    def cursor(self, autocommit=False, readonly=False):
        if self._connpool is None:
            self.connect()
        conn = self._connpool.getconn()
        if autocommit:
            conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
        else:
            conn.set_isolation_level(ISOLATION_LEVEL_REPEATABLE_READ)
        cursor = Cursor(self._connpool, conn, self)
        if readonly:
            cursor.execute('SET TRANSACTION READ ONLY')
        return cursor

    def close(self):
        if self._connpool is None:
            return
        self._connpool.closeall()
        self._connpool = None

    @classmethod
    def create(cls, cursor, database_name):
        cursor.execute('CREATE DATABASE "' + database_name + '" '
            'TEMPLATE template0 ENCODING \'unicode\'')
        cls._list_cache = None

    @classmethod
    def drop(cls, cursor, database_name):
        cursor.execute('DROP DATABASE "' + database_name + '"')
        cls._list_cache = None

    def get_version(self, cursor):
        if self.database_name not in self._version_cache:
            cursor.execute('SELECT version()')
            version, = cursor.fetchone()
            self._version_cache[self.database_name] = tuple(map(int,
                RE_VERSION.search(version).groups()))
        return self._version_cache[self.database_name]

    @staticmethod
    def dump(database_name):
        from trytond.tools import exec_command_pipe

        cmd = ['pg_dump', '--format=c', '--no-owner']
        env = {}
        uri = parse_uri(config.get('database', 'uri'))
        if uri.username:
            cmd.append('--username=' + uri.username)
        if uri.hostname:
            cmd.append('--host=' + uri.hostname)
        if uri.port:
            cmd.append('--port=' + str(uri.port))
        if uri.password:
            # if db_password is set in configuration we should pass
            # an environment variable PGPASSWORD to our subprocess
            # see libpg documentation
            env['PGPASSWORD'] = uri.password
        cmd.append(database_name)

        pipe = exec_command_pipe(*tuple(cmd), env=env)
        pipe.stdin.close()
        data = pipe.stdout.read()
        res = pipe.wait()
        if res:
            raise Exception('Couldn\'t dump database!')
#.........这里部分代码省略.........
开发者ID:kret0s,项目名称:tryton3_8,代码行数:103,代码来源:database.py

示例12: __init__

# 需要导入模块: from psycopg2.pool import ThreadedConnectionPool [as 别名]
# 或者: from psycopg2.pool.ThreadedConnectionPool import closeall [as 别名]

#.........这里部分代码省略.........
        if conn_dict["Port"] is None:
            self.port = '5432'
        else:
            self.port = conn_dict["Port"]

        self.database = conn_dict["Database"]
        self.user = conn_dict["User"]
        self.passwd = conn_dict["Password"]

        conn_params = "host='{host}' dbname='{db}' user='{user}' password='{passwd}' port='{port}'".format(
            host=self.host, db=self.database, user=self.user, passwd=self.passwd, port=self.port
        )

        try:
            logger.debug('creating pool')
            self.pool = ThreadedConnectionPool(int(limits["Min"]), int(limits["Max"]), conn_params)
        except Exception as e:
            logger.exception(e.message)

    def get_conn(self):
        """
        Get a connection from pool and return connection and cursor
        :return: conn, cursor
        """
        logger.debug('getting connection from pool')
        try:
            conn = self.pool.getconn()
            cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
            return conn, cursor
        except Exception as e:
            logger.exception(e.message)
            return None, None

    @staticmethod
    def execute_query(cursor, query, params):
        """
        Execute a query on database

        :param cursor: cursor object
        :param query: database query
        :type query: str
        :param params: query parameters
        :type params: tuple
        :return: query results or bool
        """
        logger.info('executing query')
        logger.debug('Cursor: {cursor}, Query: {query}'.format(
            cursor=cursor, query=query))

        try:
            if query.split()[0].lower() == 'select':
                cursor.execute(query, params)
                return cursor.fetchall()
            else:
                return cursor.execute(query, params)
        except Exception as e:
            logger.exception(e.message)
            return False

    # commit changes to db permanently
    @staticmethod
    def commit_changes(conn):
        """
        Commit changes to the databse permanently

        :param conn: connection object
        :return: bool
        """
        logger.debug('commiting changes to database')
        try:
            return conn.commit()
        except Exception as e:
            logger.exception(e.message)
            return False

    def put_conn(self, conn):
        """
        Put connection back to the pool

        :param conn: connection object
        :return: bool
        """
        logger.debug('putting connection {conn} back to pool'.format(conn=conn))
        try:
            return self.pool.putconn(conn)
        except Exception as e:
            logger.exception(e.message)
            return False

    def close_pool(self):
        """
        Closes connection pool
        :return: bool
        """
        logger.debug('closing connections pool')
        try:
            return self.pool.closeall()
        except Exception as e:
            logger.exception(e.message)
            return False
开发者ID:a-wakeel,项目名称:pg-plain,代码行数:104,代码来源:pg_pool.py

示例13: Database

# 需要导入模块: from psycopg2.pool import ThreadedConnectionPool [as 别名]
# 或者: from psycopg2.pool.ThreadedConnectionPool import closeall [as 别名]
class Database(DatabaseInterface):

    _databases = {}
    _connpool = None
    _list_cache = None
    _list_cache_timestamp = None
    _version_cache = {}
    flavor = Flavor(ilike=True)

    def __new__(cls, name='template1'):
        if name in cls._databases:
            return cls._databases[name]
        return DatabaseInterface.__new__(cls, name=name)

    def __init__(self, name='template1'):
        super(Database, self).__init__(name=name)
        self._databases.setdefault(name, self)
        self._search_path = None
        self._current_user = None

    @classmethod
    def dsn(cls, name):
        uri = parse_uri(config.get('database', 'uri'))
        assert uri.scheme == 'postgresql'
        host = uri.hostname and "host=%s" % uri.hostname or ''
        port = uri.port and "port=%s" % uri.port or ''
        name = "dbname=%s" % name
        user = uri.username and "user=%s" % uri.username or ''
        password = ("password=%s" % urllib.unquote_plus(uri.password)
            if uri.password else '')
        return '%s %s %s %s %s' % (host, port, name, user, password)

    def connect(self):
        if self._connpool is not None:
            return self
        logger.info('connect to "%s"', self.name)
        minconn = config.getint('database', 'minconn', default=1)
        maxconn = config.getint('database', 'maxconn', default=64)
        self._connpool = ThreadedConnectionPool(
            minconn, maxconn, self.dsn(self.name))
        return self

    def get_connection(self, autocommit=False, readonly=False):
        if self._connpool is None:
            self.connect()
        conn = self._connpool.getconn()
        if autocommit:
            conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
        else:
            conn.set_isolation_level(ISOLATION_LEVEL_REPEATABLE_READ)
        if readonly:
            cursor = conn.cursor()
            cursor.execute('SET TRANSACTION READ ONLY')
        conn.cursor_factory = PerfCursor
        return conn

    def put_connection(self, connection, close=False):
        self._connpool.putconn(connection, close=close)

    def close(self):
        if self._connpool is None:
            return
        self._connpool.closeall()
        self._connpool = None

    @classmethod
    def create(cls, connection, database_name):
        cursor = connection.cursor()
        cursor.execute('CREATE DATABASE "' + database_name + '" '
            'TEMPLATE template0 ENCODING \'unicode\'')
        connection.commit()
        cls._list_cache = None

    def drop(self, connection, database_name):
        cursor = connection.cursor()
        cursor.execute('DROP DATABASE "' + database_name + '"')
        Database._list_cache = None

    def get_version(self, connection):
        if self.name not in self._version_cache:
            cursor = connection.cursor()
            cursor.execute('SELECT version()')
            version, = cursor.fetchone()
            self._version_cache[self.name] = tuple(map(int,
                RE_VERSION.search(version).groups()))
        return self._version_cache[self.name]

    @staticmethod
    def dump(database_name):
        from trytond.tools import exec_command_pipe

        cmd = ['pg_dump', '--format=c', '--no-owner']
        env = {}
        uri = parse_uri(config.get('database', 'uri'))
        if uri.username:
            cmd.append('--username=' + uri.username)
        if uri.hostname:
            cmd.append('--host=' + uri.hostname)
        if uri.port:
            cmd.append('--port=' + str(uri.port))
#.........这里部分代码省略.........
开发者ID:coopengo,项目名称:trytond,代码行数:103,代码来源:database.py


注:本文中的psycopg2.pool.ThreadedConnectionPool.closeall方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。