本文整理汇总了C++中P2ALIGN函数的典型用法代码示例。如果您正苦于以下问题:C++ P2ALIGN函数的具体用法?C++ P2ALIGN怎么用?C++ P2ALIGN使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了P2ALIGN函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: pci_cfgacc_map
static caddr_t
pci_cfgacc_map(paddr_t phys_addr)
{
#ifdef __xpv
phys_addr = pfn_to_pa(xen_assign_pfn(mmu_btop(phys_addr))) |
(phys_addr & MMU_PAGEOFFSET);
#endif
if (khat_running) {
pfn_t pfn = mmu_btop(phys_addr);
/*
* pci_cfgacc_virt_base may hold address left from early
* boot, which points to low mem. Realloc virtual address
* in kernel space since it's already late in boot now.
* Note: no need to unmap first, clear_boot_mappings() will
* do that for us.
*/
if (pci_cfgacc_virt_base < (caddr_t)kernelbase)
pci_cfgacc_virt_base = vmem_alloc(heap_arena,
MMU_PAGESIZE, VM_SLEEP);
hat_devload(kas.a_hat, pci_cfgacc_virt_base,
MMU_PAGESIZE, pfn, PROT_READ | PROT_WRITE |
HAT_STRICTORDER, HAT_LOAD_LOCK);
} else {
paddr_t pa_base = P2ALIGN(phys_addr, MMU_PAGESIZE);
if (pci_cfgacc_virt_base == NULL)
pci_cfgacc_virt_base =
(caddr_t)alloc_vaddr(MMU_PAGESIZE, MMU_PAGESIZE);
kbm_map((uintptr_t)pci_cfgacc_virt_base, pa_base, 0, 0);
}
return (pci_cfgacc_virt_base + (phys_addr & MMU_PAGEOFFSET));
}
示例2: vdev_cache_read
/*
* Read data from the cache. Returns 0 on cache hit, errno on a miss.
*/
int
vdev_cache_read(zio_t *zio)
{
vdev_cache_t *vc = &zio->io_vd->vdev_cache;
vdev_cache_entry_t *ve, *ve_search;
uint64_t cache_offset = P2ALIGN(zio->io_offset, VCBS);
ASSERTV(uint64_t cache_phase = P2PHASE(zio->io_offset, VCBS);)
示例3: plat_get_mem_unum
/*ARGSUSED*/
int
plat_get_mem_unum(int synd_code, uint64_t flt_addr, int flt_bus_id,
int flt_in_memory, ushort_t flt_status, char *buf, int buflen, int *lenp)
{
if (flt_in_memory && (p2get_mem_unum != NULL))
return (p2get_mem_unum(synd_code, P2ALIGN(flt_addr, 8),
buf, buflen, lenp));
else
return (ENOTSUP);
}
示例4: address_in_range
/*
* Check whether any portion of [start, end] segment is within the
* [start_addr, end_addr] range.
*
* Return values:
* 0 - address is outside the range
* 1 - address is within the range
*/
static int
address_in_range(uintptr_t start, uintptr_t end, size_t psz)
{
int rc = 1;
/*
* Nothing to do if there is no address range specified with -A
*/
if (start_addr != INVALID_ADDRESS || end_addr != INVALID_ADDRESS) {
/* The segment end is below the range start */
if ((start_addr != INVALID_ADDRESS) &&
(end < P2ALIGN(start_addr, psz)))
rc = 0;
/* The segment start is above the range end */
if ((end_addr != INVALID_ADDRESS) &&
(start > P2ALIGN(end_addr + psz, psz)))
rc = 0;
}
return (rc);
}
示例5: zvol_discard
static int
zvol_discard(struct bio *bio)
{
zvol_state_t *zv = bio->bi_bdev->bd_disk->private_data;
uint64_t start = BIO_BI_SECTOR(bio) << 9;
uint64_t size = BIO_BI_SIZE(bio);
uint64_t end = start + size;
int error;
rl_t *rl;
dmu_tx_t *tx;
ASSERT(zv && zv->zv_open_count > 0);
if (end > zv->zv_volsize)
return (SET_ERROR(EIO));
/*
* Align the request to volume block boundaries when REQ_SECURE is
* available, but not requested. If we don't, then this will force
* dnode_free_range() to zero out the unaligned parts, which is slow
* (read-modify-write) and useless since we are not freeing any space
* by doing so. Kernels that do not support REQ_SECURE (2.6.32 through
* 2.6.35) will not receive this optimization.
*/
#ifdef REQ_SECURE
if (!(bio->bi_rw & REQ_SECURE)) {
start = P2ROUNDUP(start, zv->zv_volblocksize);
end = P2ALIGN(end, zv->zv_volblocksize);
size = end - start;
}
#endif
if (start >= end)
return (0);
rl = zfs_range_lock(&zv->zv_znode, start, size, RL_WRITER);
tx = dmu_tx_create(zv->zv_objset);
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0) {
dmu_tx_abort(tx);
} else {
zvol_log_truncate(zv, tx, start, size, B_TRUE);
dmu_tx_commit(tx);
error = dmu_free_long_range(zv->zv_objset,
ZVOL_OBJ, start, size);
}
zfs_range_unlock(rl);
return (error);
}
示例6: zvol_discard
static int
zvol_discard(struct bio *bio)
{
zvol_state_t *zv = bio->bi_bdev->bd_disk->private_data;
uint64_t start = BIO_BI_SECTOR(bio) << 9;
uint64_t size = BIO_BI_SIZE(bio);
uint64_t end = start + size;
int error;
rl_t *rl;
dmu_tx_t *tx;
ASSERT(zv && zv->zv_open_count > 0);
if (end > zv->zv_volsize)
return (SET_ERROR(EIO));
/*
* Align the request to volume block boundaries when a secure erase is
* not required. This will prevent dnode_free_range() from zeroing out
* the unaligned parts which is slow (read-modify-write) and useless
* since we are not freeing any space by doing so.
*/
if (!bio_is_secure_erase(bio)) {
start = P2ROUNDUP(start, zv->zv_volblocksize);
end = P2ALIGN(end, zv->zv_volblocksize);
size = end - start;
}
if (start >= end)
return (0);
rl = zfs_range_lock(&zv->zv_range_lock, start, size, RL_WRITER);
tx = dmu_tx_create(zv->zv_objset);
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0) {
dmu_tx_abort(tx);
} else {
zvol_log_truncate(zv, tx, start, size, B_TRUE);
dmu_tx_commit(tx);
error = dmu_free_long_range(zv->zv_objset,
ZVOL_OBJ, start, size);
}
zfs_range_unlock(rl);
return (error);
}
示例7: copy_memlist_filter
/*
* Copy in a memory list from boot to kernel, with a filter function
* to remove pages. The filter function can increase the address and/or
* decrease the size to filter out pages. It will also align addresses and
* sizes to PAGESIZE.
*/
void
copy_memlist_filter(
struct memlist *src,
struct memlist **dstp,
void (*filter)(uint64_t *, uint64_t *))
{
struct memlist *dst, *prev;
uint64_t addr;
uint64_t size;
uint64_t eaddr;
dst = *dstp;
prev = dst;
/*
* Move through the memlist applying a filter against
* each range of memory. Note that we may apply the
* filter multiple times against each memlist entry.
*/
for (; src; src = src->ml_next) {
addr = P2ROUNDUP(src->ml_address, PAGESIZE);
eaddr = P2ALIGN(src->ml_address + src->ml_size, PAGESIZE);
while (addr < eaddr) {
size = eaddr - addr;
if (filter != NULL)
filter(&addr, &size);
if (size == 0)
break;
dst->ml_address = addr;
dst->ml_size = size;
dst->ml_next = 0;
if (prev == dst) {
dst->ml_prev = 0;
dst++;
} else {
dst->ml_prev = prev;
prev->ml_next = dst;
dst++;
prev++;
}
addr += size;
}
}
*dstp = dst;
}
示例8: zvol_discard
static void
zvol_discard(void *arg)
{
struct request *req = (struct request *)arg;
struct request_queue *q = req->q;
zvol_state_t *zv = q->queuedata;
fstrans_cookie_t cookie = spl_fstrans_mark();
uint64_t start = blk_rq_pos(req) << 9;
uint64_t end = start + blk_rq_bytes(req);
int error;
rl_t *rl;
if (end > zv->zv_volsize) {
error = EIO;
goto out;
}
/*
* Align the request to volume block boundaries. If we don't,
* then this will force dnode_free_range() to zero out the
* unaligned parts, which is slow (read-modify-write) and
* useless since we are not freeing any space by doing so.
*/
start = P2ROUNDUP(start, zv->zv_volblocksize);
end = P2ALIGN(end, zv->zv_volblocksize);
if (start >= end) {
error = 0;
goto out;
}
rl = zfs_range_lock(&zv->zv_znode, start, end - start, RL_WRITER);
error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, start, end-start);
/*
* TODO: maybe we should add the operation to the log.
*/
zfs_range_unlock(rl);
out:
blk_end_request(req, -error, blk_rq_bytes(req));
spl_fstrans_unmark(cookie);
}
示例9: zvol_discard
static int
zvol_discard(struct bio *bio)
{
zvol_state_t *zv = bio->bi_bdev->bd_disk->private_data;
uint64_t start = BIO_BI_SECTOR(bio) << 9;
uint64_t size = BIO_BI_SIZE(bio);
uint64_t end = start + size;
int error;
rl_t *rl;
if (end > zv->zv_volsize)
return (SET_ERROR(EIO));
/*
* Align the request to volume block boundaries when REQ_SECURE is
* available, but not requested. If we don't, then this will force
* dnode_free_range() to zero out the unaligned parts, which is slow
* (read-modify-write) and useless since we are not freeing any space
* by doing so. Kernels that do not support REQ_SECURE (2.6.32 through
* 2.6.35) will not receive this optimization.
*/
#ifdef REQ_SECURE
if (!(bio->bi_rw & REQ_SECURE)) {
start = P2ROUNDUP(start, zv->zv_volblocksize);
end = P2ALIGN(end, zv->zv_volblocksize);
size = end - start;
}
#endif
if (start >= end)
return (0);
rl = zfs_range_lock(&zv->zv_znode, start, size, RL_WRITER);
error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, start, size);
/*
* TODO: maybe we should add the operation to the log.
*/
zfs_range_unlock(rl);
return (error);
}
示例10: fletcher_4_byteswap
void
fletcher_4_byteswap(const void *buf, uint64_t size, zio_cksum_t *zcp)
{
const fletcher_4_ops_t *ops;
uint64_t p2size = P2ALIGN(size, 64);
ASSERT(IS_P2ALIGNED(size, sizeof (uint32_t)));
if (size == 0) {
ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);
} else if (p2size == 0) {
ops = &fletcher_4_scalar_ops;
fletcher_4_byteswap_impl(ops, buf, size, zcp);
} else {
ops = fletcher_4_impl_get();
fletcher_4_byteswap_impl(ops, buf, p2size, zcp);
if (p2size < size)
fletcher_4_incremental_byteswap((char *)buf + p2size,
size - p2size, zcp);
}
}
示例11: fletcher_4_byteswap
/*ARGSUSED*/
void
fletcher_4_byteswap(const void *buf, uint64_t size,
const void *ctx_template, zio_cksum_t *zcp)
{
const uint64_t p2size = P2ALIGN(size, 64);
ASSERT(IS_P2ALIGNED(size, sizeof (uint32_t)));
if (size == 0 || p2size == 0) {
ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);
if (size > 0)
fletcher_4_scalar_byteswap((fletcher_4_ctx_t *)zcp,
buf, size);
} else {
fletcher_4_byteswap_impl(buf, p2size, zcp);
if (p2size < size)
fletcher_4_scalar_byteswap((fletcher_4_ctx_t *)zcp,
(char *)buf + p2size, size - p2size);
}
}
示例12: dmu_buf_hold_array_by_dnode
/*
* Note: longer-term, we should modify all of the dmu_buf_*() interfaces
* to take a held dnode rather than <os, object> -- the lookup is wasteful,
* and can induce severe lock contention when writing to several files
* whose dnodes are in the same block.
*/
static int
dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp, uint32_t flags)
{
dmu_buf_t **dbp;
uint64_t blkid, nblks, i;
uint32_t dbuf_flags;
int err;
zio_t *zio;
ASSERT(length <= DMU_MAX_ACCESS);
dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT;
if (flags & DMU_READ_NO_PREFETCH || length > zfetch_array_rd_sz)
dbuf_flags |= DB_RF_NOPREFETCH;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_datablkshift) {
int blkshift = dn->dn_datablkshift;
nblks = (P2ROUNDUP(offset+length, 1ULL<<blkshift) -
P2ALIGN(offset, 1ULL<<blkshift)) >> blkshift;
} else {
示例13: kgrep_range_basic
static int
kgrep_range_basic(uintptr_t base, uintptr_t lim, void *kg_arg)
{
kgrep_data_t *kg = kg_arg;
size_t pagesize = kg->kg_pagesize;
uintptr_t pattern = kg->kg_pattern;
uintptr_t *page = kg->kg_page;
uintptr_t *page_end = &page[pagesize / sizeof (uintptr_t)];
uintptr_t *pos;
uintptr_t addr, offset;
int seen = 0;
/*
* page-align everything, to simplify the loop
*/
base = P2ALIGN(base, pagesize);
lim = P2ROUNDUP(lim, pagesize);
for (addr = base; addr < lim; addr += pagesize) {
if (mdb_vread(page, pagesize, addr) == -1)
continue;
seen = 1;
for (pos = page; pos < page_end; pos++) {
if (*pos != pattern)
continue;
offset = (caddr_t)pos - (caddr_t)page;
kgrep_cb(addr + offset, NULL, kg->kg_cbtype);
}
}
if (seen)
kg->kg_seen = 1;
return (WALK_NEXT);
}
示例14: dmu_buf_hold_array_by_dnode
/*
* Note: longer-term, we should modify all of the dmu_buf_*() interfaces
* to take a held dnode rather than <os, object> -- the lookup is wasteful,
* and can induce severe lock contention when writing to several files
* whose dnodes are in the same block.
*/
static int
dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset,
uint64_t length, int read, const void *tag, int *numbufsp, dmu_buf_t ***dbpp)
{
dsl_pool_t *dp = NULL;
dmu_buf_t **dbp;
uint64_t blkid, nblks, i;
uint32_t flags;
int err;
zio_t *zio;
hrtime_t start;
ASSERT(length <= DMU_MAX_ACCESS);
flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT;
if (length > zfetch_array_rd_sz)
flags |= DB_RF_NOPREFETCH;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_datablkshift) {
int blkshift = dn->dn_datablkshift;
nblks = (P2ROUNDUP(offset+length, 1ULL<<blkshift) -
P2ALIGN(offset, 1ULL<<blkshift)) >> blkshift;
} else {
示例15: vdev_cache_allocate
/*
* Allocate an entry in the cache. At the point we don't have the data,
* we're just creating a placeholder so that multiple threads don't all
* go off and read the same blocks.
*/
static vdev_cache_entry_t *
vdev_cache_allocate(zio_t *zio)
{
vdev_cache_t *vc = &zio->io_vd->vdev_cache;
uint64_t offset = P2ALIGN(zio->io_offset, VCBS);
vdev_cache_entry_t *ve;
ASSERT(MUTEX_HELD(&vc->vc_lock));
if (zfs_vdev_cache_size == 0)
return (NULL);
/*
* If adding a new entry would exceed the cache size,
* evict the oldest entry (LRU).
*/
if ((avl_numnodes(&vc->vc_lastused_tree) << zfs_vdev_cache_bshift) >
zfs_vdev_cache_size) {
ve = avl_first(&vc->vc_lastused_tree);
if (ve->ve_fill_io != NULL)
return (NULL);
ASSERT3U(ve->ve_hits, !=, 0);
vdev_cache_evict(vc, ve);
}