当前位置: 首页>>代码示例>>Python>>正文


Python Connection.execute方法代码示例

本文整理汇总了Python中s3ql.database.Connection.execute方法的典型用法代码示例。如果您正苦于以下问题:Python Connection.execute方法的具体用法?Python Connection.execute怎么用?Python Connection.execute使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在s3ql.database.Connection的用法示例。


在下文中一共展示了Connection.execute方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: runTest

# 需要导入模块: from s3ql.database import Connection [as 别名]
# 或者: from s3ql.database.Connection import execute [as 别名]
    def runTest(self):
        try:
            subprocess.call(['rsync', '--version'],
                            stderr=subprocess.STDOUT,
                            stdout=open('/dev/null', 'wb'))
        except OSError as exc:
            if exc.errno == errno.ENOENT:
                raise unittest.SkipTest('rsync not installed')
            raise

        ref_dir = tempfile.mkdtemp()
        try:
            populate_dir(ref_dir)

            # Make file system and fake high inode number
            self.mkfs()
            db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
            db.execute('UPDATE sqlite_sequence SET seq=? WHERE name=?',
                       (2 ** 31 + 10, u'inodes'))
            db.close()

            # Copy source data
            self.mount()
            subprocess.check_call(['rsync', '-aHAX', ref_dir + '/',
                                   self.mnt_dir + '/'])
            self.umount()

            # Check that inode watermark is high
            db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
            self.assertGreater(db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?', (u'inodes',)), 2 ** 31 + 10)
            self.assertGreater(db.get_val('SELECT MAX(id) FROM inodes'), 2 ** 31 + 10)
            db.close()

            # Renumber inodes
            self.fsck()

            # Check if renumbering was done
            db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
            self.assertLess(db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?', (u'inodes',)), 2 ** 31)
            self.assertLess(db.get_val('SELECT MAX(id) FROM inodes'), 2 ** 31)
            db.close()

            # Compare
            self.mount()
            rsync = subprocess.Popen(['rsync', '-anciHAX', '--delete',
                                      '--exclude', '/lost+found',
                                      ref_dir + '/', self.mnt_dir + '/'],
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.STDOUT)
            out = rsync.communicate()[0]
            if out:
                self.fail('Copy not equal to original, rsync says:\n' + out)
            elif rsync.returncode != 0:
                self.fail('rsync failed with ' + out)

            self.umount()
        finally:
            shutil.rmtree(ref_dir)
开发者ID:thefirstwind,项目名称:s3qloss,代码行数:60,代码来源:t5_fsck.py

示例2: test

# 需要导入模块: from s3ql.database import Connection [as 别名]
# 或者: from s3ql.database.Connection import execute [as 别名]
    def test(self):
        skip_without_rsync()
        ref_dir = tempfile.mkdtemp(prefix='s3ql-ref-')
        try:
            populate_dir(ref_dir)

            # Make file system and fake high inode number
            self.mkfs()
            db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
            db.execute('UPDATE sqlite_sequence SET seq=? WHERE name=?',
                       (2 ** 31 + 10, 'inodes'))
            db.close()

            # Copy source data
            self.mount()
            subprocess.check_call(['rsync', '-aHAX', ref_dir + '/',
                                   self.mnt_dir + '/'])
            self.umount()

            # Check that inode watermark is high
            db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
            assert db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?',
                              ('inodes',)) > 2 ** 31 + 10
            assert db.get_val('SELECT MAX(id) FROM inodes') > 2 ** 31 + 10
            db.close()

            # Renumber inodes
            self.fsck()

            # Check if renumbering was done
            db = Connection(get_backend_cachedir(self.storage_url, self.cache_dir) + '.db')
            assert db.get_val('SELECT seq FROM sqlite_sequence WHERE name=?',
                               ('inodes',)) < 2 ** 31
            assert db.get_val('SELECT MAX(id) FROM inodes') < 2 ** 31
            db.close()

            # Compare
            self.mount()
            try:
                out = check_output(['rsync', '-anciHAX', '--delete', '--exclude', '/lost+found',
                                   ref_dir + '/', self.mnt_dir + '/'], universal_newlines=True,
                                  stderr=subprocess.STDOUT)
            except CalledProcessError as exc:
                pytest.fail('rsync failed with ' + exc.output)
            if out:
                pytest.fail('Copy not equal to original, rsync says:\n' + out)

            self.umount()
        finally:
            shutil.rmtree(ref_dir)
开发者ID:NickChen0113,项目名称:s3ql,代码行数:52,代码来源:t5_fsck.py

示例3: main

# 需要导入模块: from s3ql.database import Connection [as 别名]
# 或者: from s3ql.database.Connection import execute [as 别名]
def main(args=None):

    if args is None:
        args = sys.argv[1:]

    options = parse_args(args)
    setup_logging(options)

    # Check for cached metadata
    cachepath = get_backend_cachedir(options.storage_url, options.cachedir)
    if not os.path.exists(cachepath + '.params'):
        raise QuietError("No local metadata found.")

    with open(cachepath + '.params', 'rb') as fh:
        param = pickle.load(fh)

    # Check revision
    if param['revision'] < CURRENT_FS_REV:
        raise QuietError('File system revision too old.')
    elif param['revision'] > CURRENT_FS_REV:
        raise QuietError('File system revision too new.')

    if os.path.exists(DBNAME):
        raise QuietError('%s exists, aborting.' % DBNAME)

    log.info('Copying database...')
    dst = tempfile.NamedTemporaryFile()
    with open(cachepath + '.db', 'rb') as src:
        shutil.copyfileobj(src, dst)
    dst.flush()
    db = Connection(dst.name)

    log.info('Scrambling...')
    md5 = lambda x: hashlib.md5(x).hexdigest()
    for (id_, name) in db.query('SELECT id, name FROM names'):
        db.execute('UPDATE names SET name=? WHERE id=?',
                   (md5(name), id_))

    for (id_, name) in db.query('SELECT inode, target FROM symlink_targets'):
        db.execute('UPDATE symlink_targets SET target=? WHERE inode=?',
                   (md5(name), id_))

    for (id_, name) in db.query('SELECT rowid, value FROM ext_attributes'):
        db.execute('UPDATE ext_attributes SET value=? WHERE rowid=?',
                   (md5(name), id_))

    log.info('Saving...')
    with open(DBNAME, 'wb+') as fh:
        dump_metadata(db, fh)
开发者ID:NickChen0113,项目名称:s3ql,代码行数:51,代码来源:scramble_db.py

示例4: cache_tests

# 需要导入模块: from s3ql.database import Connection [as 别名]
# 或者: from s3ql.database.Connection import execute [as 别名]
class cache_tests(unittest.TestCase):

    def setUp(self):
        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name. 
        # Therefore, we unlink the file manually in tearDown() 
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)
        
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)
        self.cache = inode_cache.InodeCache(self.db, 0)

    def tearDown(self):
        self.cache.destroy()
        os.unlink(self.dbfile.name)

    def test_create(self):
        attrs = {'mode': 784,
                 'refcount': 3,
                 'uid': 7,
                 'gid': 2,
                 'size': 34674,
                 'rdev': 11,
                 'atime': time.time(),
                 'ctime': time.time(),
                 'mtime': time.time() }

        inode = self.cache.create_inode(**attrs)

        for key in attrs.keys():
            self.assertEqual(attrs[key], getattr(inode, key))

        self.assertTrue(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id,)))


    def test_del(self):
        attrs = {'mode': 784,
                'refcount': 3,
                'uid': 7,
                'gid': 2,
                'size': 34674,
                'rdev': 11,
                'atime': time.time(),
                'ctime': time.time(),
                'mtime': time.time() }
        inode = self.cache.create_inode(**attrs)
        del self.cache[inode.id]
        self.assertFalse(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id,)))
        self.assertRaises(KeyError, self.cache.__delitem__, inode.id)

    def test_get(self):
        attrs = {'mode': 784,
                'refcount': 3,
                'uid': 7,
                'gid': 2,
                'size': 34674,
                'rdev': 11,
                'atime': time.time(),
                'ctime': time.time(),
                'mtime': time.time() }

        inode = self.cache.create_inode(**attrs)
        for (key, val) in attrs.iteritems():
            self.assertEqual(getattr(inode, key), val)

        # Create another inode
        self.cache.create_inode(**attrs)

        self.db.execute('DELETE FROM inodes WHERE id=?', (inode.id,))
        # Entry should still be in cache
        self.assertEqual(inode, self.cache[inode.id])

        # Now it should be out of the cache
        for _ in xrange(inode_cache.CACHE_SIZE + 1):
            self.cache.create_inode(**attrs)

        self.assertRaises(KeyError, self.cache.__getitem__, inode.id)
开发者ID:thefirstwind,项目名称:s3qloss,代码行数:80,代码来源:t3_inode_cache.py

示例5: DumpTests

# 需要导入模块: from s3ql.database import Connection [as 别名]
# 或者: from s3ql.database.Connection import execute [as 别名]
class DumpTests(unittest.TestCase):
    def setUp(self):
        self.tmpfh1 = tempfile.NamedTemporaryFile()
        self.tmpfh2 = tempfile.NamedTemporaryFile()
        self.src = Connection(self.tmpfh1.name)
        self.dst = Connection(self.tmpfh2.name)
        self.fh = tempfile.TemporaryFile()

        # Disable exclusive locking for all tests
        self.src.execute('PRAGMA locking_mode = NORMAL')
        self.dst.execute('PRAGMA locking_mode = NORMAL')

        self.create_table(self.src)
        self.create_table(self.dst)

    def tearDown(self):
        self.src.close()
        self.dst.close()
        self.tmpfh1.close()
        self.tmpfh2.close()
        self.fh.close()

    def test_transactions(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, 0),)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        self.dst.execute('PRAGMA journal_mode = WAL')

        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh, trx_rows=10)
        self.compare_tables(self.src, self.dst)

    def test_1_vals_1(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, 0),)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_1_vals_2(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, 1),)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_1_vals_3(self):
        self.fill_vals(self.src)
        dumpspec = (('id', deltadump.INTEGER, -1),)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_2_buf_auto(self):
        self.fill_vals(self.src)
        self.fill_buf(self.src)
        dumpspec = (('id', deltadump.INTEGER),
                    ('buf', deltadump.BLOB))
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_2_buf_fixed(self):
        BUFLEN = 32
        self.fill_vals(self.src)
        self.fill_buf(self.src, BUFLEN)
        dumpspec = (('id', deltadump.INTEGER),
                    ('buf', deltadump.BLOB, BUFLEN))
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_3_deltas_1(self):
        self.fill_deltas(self.src)
        dumpspec = (('id', deltadump.INTEGER, 0),)
        deltadump.dump_table(table='test', order='id', columns=dumpspec,
                             db=self.src, fh=self.fh)
        self.fh.seek(0)
        deltadump.load_table(table='test', columns=dumpspec, db=self.dst,
                             fh=self.fh)
        self.compare_tables(self.src, self.dst)

    def test_3_deltas_2(self):
#.........这里部分代码省略.........
开发者ID:DirtYiCE,项目名称:s3ql,代码行数:103,代码来源:t1_dump.py

示例6: cache_tests

# 需要导入模块: from s3ql.database import Connection [as 别名]
# 或者: from s3ql.database.Connection import execute [as 别名]
class cache_tests(TestCase):
    def setUp(self):

        self.bucket_dir = tempfile.mkdtemp()
        self.bucket_pool = BucketPool(lambda: local.Bucket(self.bucket_dir, None, None))

        self.cachedir = tempfile.mkdtemp() + "/"
        self.blocksize = 1024

        self.dbfile = tempfile.NamedTemporaryFile()
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Create an inode we can work with
        self.inode = 42
        self.db.execute(
            "INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount,size) " "VALUES (?,?,?,?,?,?,?,?,?)",
            (
                self.inode,
                stat.S_IFREG
                | stat.S_IRUSR
                | stat.S_IWUSR
                | stat.S_IXUSR
                | stat.S_IRGRP
                | stat.S_IXGRP
                | stat.S_IROTH
                | stat.S_IXOTH,
                os.getuid(),
                os.getgid(),
                time.time(),
                time.time(),
                time.time(),
                1,
                32,
            ),
        )

        self.cache = BlockCache(self.bucket_pool, self.db, self.cachedir, self.blocksize * 100)

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()

    def tearDown(self):
        self.cache.bucket_pool = self.bucket_pool
        self.cache.destroy()
        if os.path.exists(self.cachedir):
            shutil.rmtree(self.cachedir)
        shutil.rmtree(self.bucket_dir)

        llfuse.lock.release()

    @staticmethod
    def random_data(len_):
        with open("/dev/urandom", "rb") as fh:
            return fh.read(len_)

    def test_get(self):
        inode = self.inode
        blockno = 11
        data = self.random_data(int(0.5 * self.blocksize))

        # Case 1: Object does not exist yet
        with self.cache.get(inode, blockno) as fh:
            fh.seek(0)
            fh.write(data)

        # Case 2: Object is in cache
        with self.cache.get(inode, blockno) as fh:
            fh.seek(0)
            self.assertEqual(data, fh.read(len(data)))

        # Case 3: Object needs to be downloaded
        self.cache.clear()
        with self.cache.get(inode, blockno) as fh:
            fh.seek(0)
            self.assertEqual(data, fh.read(len(data)))

    def test_expire(self):
        inode = self.inode

        # Define the 4 most recently accessed ones
        most_recent = [7, 11, 10, 8]
        for i in most_recent:
            time.sleep(0.2)
            with self.cache.get(inode, i) as fh:
                fh.write("%d" % i)

        # And some others
        for i in range(20):
            if i in most_recent:
                continue
            with self.cache.get(inode, i) as fh:
                fh.write("%d" % i)

        # Flush the 2 most recently accessed ones
        commit(self.cache, inode, most_recent[-2])
        commit(self.cache, inode, most_recent[-3])

#.........这里部分代码省略.........
开发者ID:netconstructor,项目名称:ossql,代码行数:103,代码来源:t2_block_cache.py

示例7: cache_tests

# 需要导入模块: from s3ql.database import Connection [as 别名]
# 或者: from s3ql.database.Connection import execute [as 别名]
class cache_tests(unittest.TestCase):

    def setUp(self):

        self.backend_dir = tempfile.mkdtemp(prefix='s3ql-backend-')
        self.backend_pool = BackendPool(lambda: local.Backend('local://' + self.backend_dir,
                                                           None, None))

        self.cachedir = tempfile.mkdtemp(prefix='s3ql-cache-')
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name.
        # Therefore, we unlink the file manually in tearDown()
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        # Create an inode we can work with
        self.inode = 42
        self.db.execute("INSERT INTO inodes (id,mode,uid,gid,mtime,atime,ctime,refcount,size) "
                        "VALUES (?,?,?,?,?,?,?,?,?)",
                        (self.inode, stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
                         | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
                         os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1, 32))

        cache = BlockCache(self.backend_pool, self.db, self.cachedir + "/cache",
                           self.max_obj_size * 100)
        self.cache = cache

        # Monkeypatch around the need for removal and upload threads
        cache.to_remove = DummyQueue(cache)

        class DummyDistributor:
            def put(self, arg, timeout=None):
                cache._do_upload(*arg)
                return True
        cache.to_upload = DummyDistributor()

        # Tested methods assume that they are called from
        # file system request handler
        llfuse.lock.acquire()

    def tearDown(self):
        llfuse.lock.release()
        self.cache.backend_pool = self.backend_pool
        self.cache.destroy()
        shutil.rmtree(self.cachedir)
        shutil.rmtree(self.backend_dir)
        self.dbfile.close()
        os.unlink(self.dbfile.name)

    def test_thread_hang(self):
        # Make sure that we don't deadlock if uploads threads or removal
        # threads have died and we try to expire or terminate

        # Monkeypatch to avoid error messages about uncaught exceptions
        # in other threads
        upload_exc = False
        removal_exc = False
        def _upload_loop(*a, fn=self.cache._upload_loop):
            try:
                return fn(*a)
            except NotADirectoryError:
                nonlocal upload_exc
                upload_exc = True
        def _removal_loop(*a, fn=self.cache._removal_loop):
            try:
                return fn(*a)
            except NotADirectoryError:
                nonlocal removal_exc
                removal_exc = True
        self.cache._upload_loop = _upload_loop
        self.cache._removal_loop = _removal_loop

        # Start threads
        self.cache.init(threads=3)

        # Create first object (we'll try to remove that)
        with self.cache.get(self.inode, 0) as fh:
            fh.write(b'bar wurfz!')
        self.cache.commit()
        self.cache.wait()

        # Make sure that upload and removal will fail
        os.rename(self.backend_dir, self.backend_dir + '-tmp')
        open(self.backend_dir, 'w').close()

        # Create second object (we'll try to upload that)
        with self.cache.get(self.inode, 1) as fh:
            fh.write(b'bar wurfz number two!')

        # Schedule a removal
        self.cache.remove(self.inode, 0)

        try:
            # Try to clean-up (implicitly calls expire)
            with llfuse.lock_released, \
                catch_logmsg('Unable to flush cache, no upload threads left alive',
#.........这里部分代码省略.........
开发者ID:rootfs,项目名称:s3ql,代码行数:103,代码来源:t2_block_cache.py

示例8: fsck_tests

# 需要导入模块: from s3ql.database import Connection [as 别名]
# 或者: from s3ql.database.Connection import execute [as 别名]
class fsck_tests(TestCase):

    def setUp(self):
        self.bucket_dir = tempfile.mkdtemp()
        self.bucket = local.Bucket(self.bucket_dir, None, None)
        self.cachedir = tempfile.mkdtemp() + "/"
        self.blocksize = 1024

        self.dbfile = tempfile.NamedTemporaryFile()
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        self.fsck = Fsck(self.cachedir, self.bucket,
                  { 'blocksize': self.blocksize }, self.db)
        self.fsck.expect_errors = True

    def tearDown(self):
        shutil.rmtree(self.cachedir)
        shutil.rmtree(self.bucket_dir)

    def assert_fsck(self, fn):
        '''Check that fn detects and corrects an error'''

        self.fsck.found_errors = False
        fn()
        self.assertTrue(self.fsck.found_errors)
        self.fsck.found_errors = False
        fn()
        self.assertFalse(self.fsck.found_errors)

    def test_cache(self):
        inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount) "
                              "VALUES (?,?,?,?,?,?,?)",
                              (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
                               | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
                               os.getuid(), os.getgid(), time.time(), time.time(), time.time(), 1))

        # Create new block
        fh = open(self.cachedir + '%d-1' % inode, 'wb')
        fh.write('somedata')
        fh.close()
        self.assert_fsck(self.fsck.check_cache)
        self.assertEquals(self.bucket['s3ql_data_1'], 'somedata')

        # This should be ignored
        fh = open(self.cachedir + '%d-1' % inode, 'wb')
        fh.write('otherdata')
        fh.close()
        self.assert_fsck(self.fsck.check_cache)
        self.assertEquals(self.bucket['s3ql_data_1'], 'somedata')

        # Existing block
        with open(self.cachedir + '%d-2' % inode, 'wb') as fh:
            fh.write('somedata')    
        self.assert_fsck(self.fsck.check_cache)
                
        # Old block preserved
        with open(self.cachedir + '%d-1' % inode, 'wb') as fh:
            fh.write('overwriting somedata')
        self.assert_fsck(self.fsck.check_cache)
        
        # Old block removed
        with open(self.cachedir + '%d-2' % inode, 'wb') as fh:
            fh.write('overwriting last piece of somedata')
        self.assert_fsck(self.fsck.check_cache)
                
        
    def test_lof1(self):

        # Make lost+found a file
        inode = self.db.get_val("SELECT inode FROM contents_v WHERE name=? AND parent_inode=?",
                                (b"lost+found", ROOT_INODE))
        self.db.execute('DELETE FROM contents WHERE parent_inode=?', (inode,))
        self.db.execute('UPDATE inodes SET mode=?, size=? WHERE id=?',
                        (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, inode))

        self.assert_fsck(self.fsck.check_lof)

    def test_lof2(self):
        # Remove lost+found
        name_id = self.db.get_val('SELECT id FROM names WHERE name=?', (b'lost+found',))
        self.db.execute('DELETE FROM contents WHERE name_id=? and parent_inode=?',
                        (name_id, ROOT_INODE))

        self.assert_fsck(self.fsck.check_lof)

    def test_wrong_inode_refcount(self):
    
        inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
                              "VALUES (?,?,?,?,?,?,?,?)",
                              (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR,
                               0, 0, time.time(), time.time(), time.time(), 1, 0))
        self._link('name1', inode)
        self._link('name2', inode)
        self.assert_fsck(self.fsck.check_inode_refcount)

    def test_orphaned_inode(self):
        
        self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
#.........这里部分代码省略.........
开发者ID:drewlu,项目名称:ossql,代码行数:103,代码来源:t3_fsck.py

示例9: main

# 需要导入模块: from s3ql.database import Connection [as 别名]
# 或者: from s3ql.database.Connection import execute [as 别名]

#.........这里部分代码省略.........
        raise QuietError('File system revision too old, please run `s3qladm upgrade` first.')
    elif param['revision'] > CURRENT_FS_REV:
        raise QuietError('File system revision too new, please update your '
                         'S3QL installation.')
    
    if param['seq_no'] < seq_no:
        if bucket.is_get_consistent():
            print(textwrap.fill(textwrap.dedent('''\
                  Up to date metadata is not available. Probably the file system has not
                  been properly unmounted and you should try to run fsck on the computer 
                  where the file system has been mounted most recently.
                  ''')))
        else:
            print(textwrap.fill(textwrap.dedent('''\
                  Up to date metadata is not available. Either the file system has not
                  been unmounted cleanly or the data has not yet propagated through the backend.
                  In the later case, waiting for a while should fix the problem, in
                  the former case you should try to run fsck on the computer where
                  the file system has been mounted most recently
                  ''')))
    
        print('Enter "continue" to use the outdated data anyway:',
              '> ', sep='\n', end='')
        if options.batch:
            raise QuietError('(in batch mode, exiting)')
        if sys.stdin.readline().strip() != 'continue':
            raise QuietError()
        
        param['seq_no'] = seq_no
        param['needs_fsck'] = True
    
    
    if (not param['needs_fsck'] 
        and ((time.time() - time.timezone) - param['last_fsck'])
             < 60 * 60 * 24 * 31): # last check more than 1 month ago
        if options.force:
            log.info('File system seems clean, checking anyway.')
        else:
            log.info('File system is marked as clean. Use --force to force checking.')
            return
    
    # If using local metadata, check consistency
    if db:
        log.info('Checking DB integrity...')
        try:
            # get_list may raise CorruptError itself
            res = db.get_list('PRAGMA integrity_check(20)')
            if res[0][0] != u'ok':
                log.error('\n'.join(x[0] for x in res ))
                raise apsw.CorruptError()
        except apsw.CorruptError:
            raise QuietError('Local metadata is corrupted. Remove or repair the following '
                             'files manually and re-run fsck:\n'
                             + cachepath + '.db (corrupted)\n'
                             + cachepath + '.param (intact)')
    else:
        log.info("Downloading & uncompressing metadata...")
        os.close(os.open(cachepath + '.db.tmp', os.O_RDWR | os.O_CREAT | os.O_TRUNC,
                         stat.S_IRUSR | stat.S_IWUSR)) 
        db = Connection(cachepath + '.db.tmp', fast_mode=True)
        with bucket.open_read("s3ql_metadata") as fh:
            restore_metadata(fh, db)
        db.close()
        os.rename(cachepath + '.db.tmp', cachepath + '.db')
        db = Connection(cachepath + '.db')
    
    # Increase metadata sequence no 
    param['seq_no'] += 1
    param['needs_fsck'] = True
    bucket['s3ql_seq_no_%d' % param['seq_no']] = 'Empty'
    pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
    
    fsck = Fsck(cachepath + '-cache', bucket, param, db)
    fsck.check()
    
    if fsck.uncorrectable_errors:
        raise QuietError("Uncorrectable errors found, aborting.")
        
    if os.path.exists(cachepath + '-cache'):
        os.rmdir(cachepath + '-cache')
        
    log.info('Saving metadata...')
    fh = tempfile.TemporaryFile()
    dump_metadata(fh, db)  
            
    log.info("Compressing & uploading metadata..")
    cycle_metadata(bucket)
    fh.seek(0)
    param['needs_fsck'] = False
    param['last_fsck'] = time.time() - time.timezone
    param['last-modified'] = time.time() - time.timezone
    with bucket.open_write("s3ql_metadata", param) as dst:
        fh.seek(0)
        shutil.copyfileobj(fh, dst)
    fh.close()
    pickle.dump(param, open(cachepath + '.params', 'wb'), 2)
        
    db.execute('ANALYZE')
    db.execute('VACUUM')
    db.close() 
开发者ID:drewlu,项目名称:ossql,代码行数:104,代码来源:fsck.py

示例10: fsck_tests

# 需要导入模块: from s3ql.database import Connection [as 别名]
# 或者: from s3ql.database.Connection import execute [as 别名]
class fsck_tests(unittest.TestCase):

    def setUp(self):
        self.backend_dir = tempfile.mkdtemp()
        self.backend = local.Backend('local://' + self.backend_dir, None, None)
        self.cachedir = tempfile.mkdtemp()
        self.max_obj_size = 1024

        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name. 
        # Therefore, we unlink the file manually in tearDown() 
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)
        
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)

        self.fsck = Fsck(self.cachedir, self.backend,
                  { 'max_obj_size': self.max_obj_size }, self.db)
        self.fsck.expect_errors = True

    def tearDown(self):
        shutil.rmtree(self.cachedir)
        shutil.rmtree(self.backend_dir)
        os.unlink(self.dbfile.name)

    def assert_fsck(self, fn):
        '''Check that fn detects and corrects an error'''

        self.fsck.found_errors = False
        fn()
        self.assertTrue(self.fsck.found_errors)
        self.fsck.found_errors = False
        self.fsck.check()
        self.assertFalse(self.fsck.found_errors)

    def test_cache(self):
        inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
                              "VALUES (?,?,?,?,?,?,?,?)",
                              (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
                               | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
                               os.getuid(), os.getgid(), time.time(), time.time(), time.time(),
                               1, 8))
        self._link('test-entry', inode)

        # Create new block
        fh = open(self.cachedir + '/%d-0' % inode, 'wb')
        fh.write('somedata')
        fh.close()
        self.assert_fsck(self.fsck.check_cache)
        self.assertEquals(self.backend['s3ql_data_1'], 'somedata')

        # Existing block
        self.db.execute('UPDATE inodes SET size=? WHERE id=?',
                        (self.max_obj_size + 8, inode))
        with open(self.cachedir + '/%d-1' % inode, 'wb') as fh:
            fh.write('somedata')
        self.assert_fsck(self.fsck.check_cache)

        # Old block preserved
        with open(self.cachedir + '/%d-0' % inode, 'wb') as fh:
            fh.write('somedat2')
        self.assert_fsck(self.fsck.check_cache)

        # Old block removed
        with open(self.cachedir + '/%d-1' % inode, 'wb') as fh:
            fh.write('somedat3')
        self.assert_fsck(self.fsck.check_cache)


    def test_lof1(self):

        # Make lost+found a file
        inode = self.db.get_val("SELECT inode FROM contents_v WHERE name=? AND parent_inode=?",
                                (b"lost+found", ROOT_INODE))
        self.db.execute('DELETE FROM contents WHERE parent_inode=?', (inode,))
        self.db.execute('UPDATE inodes SET mode=?, size=? WHERE id=?',
                        (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR, 0, inode))

        def check():
            self.fsck.check_lof()
            self.fsck.check_inodes_refcount()

        self.assert_fsck(check)

    def test_lof2(self):
        # Remove lost+found
        name_id = self.db.get_val('SELECT id FROM names WHERE name=?', (b'lost+found',))
        inode = self.db.get_val('SELECT inode FROM contents WHERE name_id=? AND '
                                'parent_inode=?', (name_id, ROOT_INODE))
        self.db.execute('DELETE FROM inodes WHERE id=?', (inode,))
        self.db.execute('DELETE FROM contents WHERE name_id=? and parent_inode=?',
                        (name_id, ROOT_INODE))
        self.db.execute('UPDATE names SET refcount = refcount-1 WHERE id=?', (name_id,))

        self.assert_fsck(self.fsck.check_lof)

    def test_wrong_inode_refcount(self):

        inode = self.db.rowid("INSERT INTO inodes (mode,uid,gid,mtime,atime,ctime,refcount,size) "
#.........这里部分代码省略.........
开发者ID:thefirstwind,项目名称:s3qloss,代码行数:103,代码来源:t3_fsck.py

示例11: cache_tests

# 需要导入模块: from s3ql.database import Connection [as 别名]
# 或者: from s3ql.database.Connection import execute [as 别名]
class cache_tests(TestCase):

    def setUp(self):
        self.dbfile = tempfile.NamedTemporaryFile()
        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)
        self.cache = inode_cache.InodeCache(self.db)

    def tearDown(self):
        self.cache.destroy()

    def test_create(self):
        attrs = {'mode': 784,
                 'refcount': 3,
                 'uid': 7,
                 'gid': 2,
                 'size': 34674,
                 'rdev': 11,
                 'atime': time.time(),
                 'ctime': time.time(),
                 'mtime': time.time() }

        inode = self.cache.create_inode(**attrs)

        for key in attrs.keys():
            self.assertEqual(attrs[key], getattr(inode, key))

        self.assertTrue(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id,)))


    def test_del(self):
        attrs = {'mode': 784,
                'refcount': 3,
                'uid': 7,
                'gid': 2,
                'size': 34674,
                'rdev': 11,
                'atime': time.time(),
                'ctime': time.time(),
                'mtime': time.time() }
        inode = self.cache.create_inode(**attrs)
        del self.cache[inode.id]
        self.assertFalse(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id,)))
        self.assertRaises(KeyError, self.cache.__delitem__, inode.id)

    def test_get(self):
        attrs = {'mode': 784,
                'refcount': 3,
                'uid': 7,
                'gid': 2,
                'size': 34674,
                'rdev': 11,
                'atime': time.time(),
                'ctime': time.time(),
                'mtime': time.time() }
        
        inode = self.cache.create_inode(**attrs)
        for (key, val) in attrs.iteritems():
            self.assertEqual(getattr(inode, key), val)

        # Create another inode
        self.cache.create_inode(**attrs)
        
        self.db.execute('DELETE FROM inodes WHERE id=?', (inode.id,))
        # Entry should still be in cache
        self.assertEqual(inode, self.cache[inode.id])

        # Now it should be out of the cache
        for _ in xrange(inode_cache.CACHE_SIZE + 1):
            self.cache.create_inode(**attrs)

        self.assertRaises(KeyError, self.cache.__getitem__, inode.id)
开发者ID:drewlu,项目名称:ossql,代码行数:75,代码来源:t3_inode_cache.py


注:本文中的s3ql.database.Connection.execute方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。