本文整理汇总了C++中page_address函数的典型用法代码示例。如果您正苦于以下问题:C++ page_address函数的具体用法?C++ page_address怎么用?C++ page_address使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了page_address函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: __kernel_map_pages
void __kernel_map_pages(struct page *page, int numpages, int enable)
{
set_memory_valid((unsigned long)page_address(page), numpages, enable);
}
示例2: netbk_gop_frag_copy
/*
* Set up the grant operations for this fragment. If it's a flipping
* interface, we also set up the unmap request from here.
*/
static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
struct netrx_pending_operations *npo,
struct page *page, unsigned long size,
unsigned long offset, int *head)
{
struct gnttab_copy *copy_gop;
struct netbk_rx_meta *meta;
/*
* These variables are used iff get_page_ext returns true,
* in which case they are guaranteed to be initialized.
*/
unsigned int uninitialized_var(group), uninitialized_var(idx);
int foreign = get_page_ext(page, &group, &idx);
unsigned long bytes;
/* Data must not cross a page boundary. */
BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
meta = npo->meta + npo->meta_prod - 1;
/* Skip unused frames from start of page */
page += offset >> PAGE_SHIFT;
offset &= ~PAGE_MASK;
while (size > 0) {
BUG_ON(offset >= PAGE_SIZE);
BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
bytes = PAGE_SIZE - offset;
if (bytes > size)
bytes = size;
if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
/*
* Netfront requires there to be some data in the head
* buffer.
*/
BUG_ON(*head);
meta = get_next_rx_buffer(vif, npo);
}
if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
bytes = MAX_BUFFER_OFFSET - npo->copy_off;
copy_gop = npo->copy + npo->copy_prod++;
copy_gop->flags = GNTCOPY_dest_gref;
if (foreign) {
struct xen_netbk *netbk = &xen_netbk[group];
struct pending_tx_info *src_pend;
src_pend = &netbk->pending_tx_info[idx];
copy_gop->source.domid = src_pend->vif->domid;
copy_gop->source.u.ref = src_pend->req.gref;
copy_gop->flags |= GNTCOPY_source_gref;
} else {
void *vaddr = page_address(page);
copy_gop->source.domid = DOMID_SELF;
copy_gop->source.u.gmfn = virt_to_mfn(vaddr);
}
copy_gop->source.offset = offset;
copy_gop->dest.domid = vif->domid;
copy_gop->dest.offset = npo->copy_off;
copy_gop->dest.u.ref = npo->copy_gref;
copy_gop->len = bytes;
npo->copy_off += bytes;
meta->size += bytes;
offset += bytes;
size -= bytes;
/* Next frame */
if (offset == PAGE_SIZE && size) {
BUG_ON(!PageCompound(page));
page++;
offset = 0;
}
/* Leave a gap for the GSO descriptor. */
if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
vif->rx.req_cons++;
*head = 0; /* There must be something in this buffer now. */
}
}
示例3: ip6_append_data
//.........这里部分代码省略.........
/*
* Find where to start putting bytes
*/
data = skb_put(skb, fraglen);
skb_set_network_header(skb, exthdrlen);
data += fragheaderlen;
skb->transport_header = (skb->network_header +
fragheaderlen);
if (fraggap) {
skb->csum = skb_copy_and_csum_bits(
skb_prev, maxfraglen,
data + transhdrlen, fraggap, 0);
skb_prev->csum = csum_sub(skb_prev->csum,
skb->csum);
data += fraggap;
pskb_trim_unique(skb_prev, maxfraglen);
}
copy = datalen - transhdrlen - fraggap;
if (copy < 0) {
err = -EINVAL;
kfree_skb(skb);
goto error;
} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
err = -EFAULT;
kfree_skb(skb);
goto error;
}
offset += copy;
length -= datalen - fraggap;
transhdrlen = 0;
exthdrlen = 0;
dst_exthdrlen = 0;
/*
* Put the packet on the pending queue
*/
__skb_queue_tail(&sk->sk_write_queue, skb);
continue;
}
if (copy > length)
copy = length;
if (!(rt->dst.dev->features&NETIF_F_SG)) {
unsigned int off;
off = skb->len;
if (getfrag(from, skb_put(skb, copy),
offset, copy, off, skb) < 0) {
__skb_trim(skb, off);
err = -EFAULT;
goto error;
}
} else {
int i = skb_shinfo(skb)->nr_frags;
struct page_frag *pfrag = sk_page_frag(sk);
err = -ENOMEM;
if (!sk_page_frag_refill(sk, pfrag))
goto error;
if (!skb_can_coalesce(skb, i, pfrag->page,
pfrag->offset)) {
err = -EMSGSIZE;
if (i == MAX_SKB_FRAGS)
goto error;
__skb_fill_page_desc(skb, i, pfrag->page,
pfrag->offset, 0);
skb_shinfo(skb)->nr_frags = ++i;
get_page(pfrag->page);
}
copy = min_t(int, copy, pfrag->size - pfrag->offset);
if (getfrag(from,
page_address(pfrag->page) + pfrag->offset,
offset, copy, skb->len, skb) < 0)
goto error_efault;
pfrag->offset += copy;
skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
atomic_add(copy, &sk->sk_wmem_alloc);
}
offset += copy;
length -= copy;
}
return 0;
error_efault:
err = -EFAULT;
error:
cork->length -= length;
IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
return err;
}
示例4: flush_icache_page
/*
* This is called when a page-cache page is about to be mapped into a
* user process' address space. It offers an opportunity for a
* port to ensure d-cache/i-cache coherency if necessary.
*
* Not entirely sure why this is necessary on SH3 with 32K cache but
* without it we get occasional "Memory fault" when loading a program.
*/
void flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
__flush_purge_region(page_address(page), PAGE_SIZE);
}
示例5: jffs2_commit_write
int jffs2_commit_write (struct file *filp, struct page *pg, unsigned start, unsigned end)
{
/* Actually commit the write from the page cache page we're looking at.
* For now, we write the full page out each time. It sucks, but it's simple
*/
struct inode *inode = pg->mapping->host;
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
struct jffs2_raw_inode *ri;
int ret = 0;
uint32_t writtenlen = 0;
D1(printk(KERN_DEBUG "jffs2_commit_write(): ino #%lu, page at 0x%lx, range %d-%d, flags %lx\n",
inode->i_ino, pg->index << PAGE_CACHE_SHIFT, start, end, pg->flags));
if (!start && end == PAGE_CACHE_SIZE) {
/* We need to avoid deadlock with page_cache_read() in
jffs2_garbage_collect_pass(). So we have to mark the
page up to date, to prevent page_cache_read() from
trying to re-lock it. */
SetPageUptodate(pg);
}
ri = jffs2_alloc_raw_inode();
if (!ri) {
D1(printk(KERN_DEBUG "jffs2_commit_write(): Allocation of raw inode failed\n"));
return -ENOMEM;
}
/* Set the fields that the generic jffs2_write_inode_range() code can't find */
ri->ino = cpu_to_je32(inode->i_ino);
ri->mode = cpu_to_jemode(inode->i_mode);
ri->uid = cpu_to_je16(inode->i_uid);
ri->gid = cpu_to_je16(inode->i_gid);
ri->isize = cpu_to_je32((uint32_t)inode->i_size);
ri->atime = ri->ctime = ri->mtime = cpu_to_je32(get_seconds());
/* In 2.4, it was already kmapped by generic_file_write(). Doesn't
hurt to do it again. The alternative is ifdefs, which are ugly. */
kmap(pg);
ret = jffs2_write_inode_range(c, f, ri, page_address(pg) + start,
(pg->index << PAGE_CACHE_SHIFT) + start,
end - start, &writtenlen);
kunmap(pg);
if (ret) {
/* There was an error writing. */
SetPageError(pg);
}
if (writtenlen) {
if (inode->i_size < (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen) {
inode->i_size = (pg->index << PAGE_CACHE_SHIFT) + start + writtenlen;
inode->i_blocks = (inode->i_size + 511) >> 9;
inode->i_ctime = inode->i_mtime = ITIME(je32_to_cpu(ri->ctime));
}
}
示例6: au_do_copy_file
static int au_do_copy_file(struct file *dst, struct file *src, loff_t len,
char *buf, unsigned long blksize)
{
int err;
size_t sz, rbytes, wbytes;
unsigned char all_zero;
char *p, *zp;
struct mutex *h_mtx;
/* reduce stack usage */
struct iattr *ia;
zp = page_address(ZERO_PAGE(0));
if (unlikely(!zp))
return -ENOMEM; /* possible? */
err = 0;
all_zero = 0;
while (len) {
AuDbg("len %lld\n", len);
sz = blksize;
if (len < blksize)
sz = len;
rbytes = 0;
/* todo: signal_pending? */
while (!rbytes || err == -EAGAIN || err == -EINTR) {
rbytes = vfsub_read_k(src, buf, sz, &src->f_pos);
err = rbytes;
}
if (unlikely(err < 0))
break;
all_zero = 0;
if (len >= rbytes && rbytes == blksize)
all_zero = !memcmp(buf, zp, rbytes);
if (!all_zero) {
wbytes = rbytes;
p = buf;
while (wbytes) {
size_t b;
b = vfsub_write_k(dst, p, wbytes, &dst->f_pos);
err = b;
/* todo: signal_pending? */
if (unlikely(err == -EAGAIN || err == -EINTR))
continue;
if (unlikely(err < 0))
break;
wbytes -= b;
p += b;
}
} else {
loff_t res;
AuLabel(hole);
res = vfsub_llseek(dst, rbytes, SEEK_CUR);
err = res;
if (unlikely(res < 0))
break;
}
len -= rbytes;
err = 0;
}
/* the last block may be a hole */
if (!err && all_zero) {
AuLabel(last hole);
err = 1;
if (au_test_nfs(dst->f_dentry->d_sb)) {
/* nfs requires this step to make last hole */
/* is this only nfs? */
do {
/* todo: signal_pending? */
err = vfsub_write_k(dst, "\0", 1, &dst->f_pos);
} while (err == -EAGAIN || err == -EINTR);
if (err == 1)
dst->f_pos--;
}
if (err == 1) {
ia = (void *)buf;
ia->ia_size = dst->f_pos;
ia->ia_valid = ATTR_SIZE | ATTR_FILE;
ia->ia_file = dst;
h_mtx = &dst->f_dentry->d_inode->i_mutex;
mutex_lock_nested(h_mtx, AuLsc_I_CHILD2);
err = vfsub_notify_change(&dst->f_path, ia);
mutex_unlock(h_mtx);
}
}
return err;
}
示例7: intel_setup_irq_remapping
static int intel_setup_irq_remapping(struct intel_iommu *iommu)
{
struct ir_table *ir_table;
struct fwnode_handle *fn;
unsigned long *bitmap;
struct page *pages;
if (iommu->ir_table)
return 0;
ir_table = kzalloc(sizeof(struct ir_table), GFP_KERNEL);
if (!ir_table)
return -ENOMEM;
pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO,
INTR_REMAP_PAGE_ORDER);
if (!pages) {
pr_err("IR%d: failed to allocate pages of order %d\n",
iommu->seq_id, INTR_REMAP_PAGE_ORDER);
goto out_free_table;
}
bitmap = bitmap_zalloc(INTR_REMAP_TABLE_ENTRIES, GFP_ATOMIC);
if (bitmap == NULL) {
pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
goto out_free_pages;
}
fn = irq_domain_alloc_named_id_fwnode("INTEL-IR", iommu->seq_id);
if (!fn)
goto out_free_bitmap;
iommu->ir_domain =
irq_domain_create_hierarchy(arch_get_ir_parent_domain(),
0, INTR_REMAP_TABLE_ENTRIES,
fn, &intel_ir_domain_ops,
iommu);
irq_domain_free_fwnode(fn);
if (!iommu->ir_domain) {
pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
goto out_free_bitmap;
}
iommu->ir_msi_domain =
arch_create_remap_msi_irq_domain(iommu->ir_domain,
"INTEL-IR-MSI",
iommu->seq_id);
ir_table->base = page_address(pages);
ir_table->bitmap = bitmap;
iommu->ir_table = ir_table;
/*
* If the queued invalidation is already initialized,
* shouldn't disable it.
*/
if (!iommu->qi) {
/*
* Clear previous faults.
*/
dmar_fault(-1, iommu);
dmar_disable_qi(iommu);
if (dmar_enable_qi(iommu)) {
pr_err("Failed to enable queued invalidation\n");
goto out_free_bitmap;
}
}
init_ir_status(iommu);
if (ir_pre_enabled(iommu)) {
if (!is_kdump_kernel()) {
pr_warn("IRQ remapping was enabled on %s but we are not in kdump mode\n",
iommu->name);
clear_ir_pre_enabled(iommu);
iommu_disable_irq_remapping(iommu);
} else if (iommu_load_old_irte(iommu))
pr_err("Failed to copy IR table for %s from previous kernel\n",
iommu->name);
else
pr_info("Copied IR table for %s from previous kernel\n",
iommu->name);
}
iommu_set_irq_remapping(iommu, eim_mode);
return 0;
out_free_bitmap:
bitmap_free(bitmap);
out_free_pages:
__free_pages(pages, INTR_REMAP_PAGE_ORDER);
out_free_table:
kfree(ir_table);
iommu->ir_table = NULL;
return -ENOMEM;
}
示例8: eseqiv_givencrypt
static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
{
struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req);
struct ablkcipher_request *subreq;
crypto_completion_t complete;
void *data;
struct scatterlist *osrc, *odst;
struct scatterlist *dst;
struct page *srcp;
struct page *dstp;
u8 *giv;
u8 *vsrc;
u8 *vdst;
__be64 seq;
unsigned int ivsize;
unsigned int len;
int err;
subreq = (void *)(reqctx->tail + ctx->reqoff);
ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
giv = req->giv;
complete = req->creq.base.complete;
data = req->creq.base.data;
osrc = req->creq.src;
odst = req->creq.dst;
srcp = sg_page(osrc);
dstp = sg_page(odst);
vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + osrc->offset;
vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + odst->offset;
ivsize = crypto_ablkcipher_ivsize(geniv);
if (vsrc != giv + ivsize && vdst != giv + ivsize) {
giv = PTR_ALIGN((u8 *)reqctx->tail,
crypto_ablkcipher_alignmask(geniv) + 1);
complete = eseqiv_complete;
data = req;
}
ablkcipher_request_set_callback(subreq, req->creq.base.flags, complete,
data);
sg_init_table(reqctx->src, 2);
sg_set_buf(reqctx->src, giv, ivsize);
eseqiv_chain(reqctx->src, osrc, vsrc == giv + ivsize);
dst = reqctx->src;
if (osrc != odst) {
sg_init_table(reqctx->dst, 2);
sg_set_buf(reqctx->dst, giv, ivsize);
eseqiv_chain(reqctx->dst, odst, vdst == giv + ivsize);
dst = reqctx->dst;
}
ablkcipher_request_set_crypt(subreq, reqctx->src, dst,
req->creq.nbytes + ivsize,
req->creq.info);
memcpy(req->creq.info, ctx->salt, ivsize);
len = ivsize;
if (ivsize > sizeof(u64)) {
memset(req->giv, 0, ivsize - sizeof(u64));
len = sizeof(u64);
}
seq = cpu_to_be64(req->seq);
memcpy(req->giv + ivsize - len, &seq, len);
err = crypto_ablkcipher_encrypt(subreq);
if (err)
goto out;
if (giv != req->giv)
eseqiv_complete2(req);
out:
return err;
}
示例9: xennet_alloc_rx_buffers
static void xennet_alloc_rx_buffers(struct net_device *dev)
{
unsigned short id;
struct netfront_info *np = netdev_priv(dev);
struct sk_buff *skb;
struct page *page;
int i, batch_target, notify;
RING_IDX req_prod = np->rx.req_prod_pvt;
grant_ref_t ref;
unsigned long pfn;
void *vaddr;
struct xen_netif_rx_request *req;
if (unlikely(!netif_carrier_ok(dev)))
return;
/*
* Allocate skbuffs greedily, even though we batch updates to the
* receive ring. This creates a less bursty demand on the memory
* allocator, so should reduce the chance of failed allocation requests
* both for ourself and for other kernel subsystems.
*/
batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
goto no_skb;
/* Align ip header to a 16 bytes boundary */
skb_reserve(skb, NET_IP_ALIGN);
page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
if (!page) {
kfree_skb(skb);
no_skb:
/* Any skbuffs queued for refill? Force them out. */
if (i != 0)
goto refill;
/* Could not allocate any skbuffs. Try again later. */
mod_timer(&np->rx_refill_timer,
jiffies + (HZ/10));
break;
}
__skb_fill_page_desc(skb, 0, page, 0, 0);
skb_shinfo(skb)->nr_frags = 1;
__skb_queue_tail(&np->rx_batch, skb);
}
/* Is the batch large enough to be worthwhile? */
if (i < (np->rx_target/2)) {
if (req_prod > np->rx.sring->req_prod)
goto push;
return;
}
/* Adjust our fill target if we risked running out of buffers. */
if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
((np->rx_target *= 2) > np->rx_max_target))
np->rx_target = np->rx_max_target;
refill:
for (i = 0; ; i++) {
skb = __skb_dequeue(&np->rx_batch);
if (skb == NULL)
break;
skb->dev = dev;
id = xennet_rxidx(req_prod + i);
BUG_ON(np->rx_skbs[id]);
np->rx_skbs[id] = skb;
ref = gnttab_claim_grant_reference(&np->gref_rx_head);
BUG_ON((signed short)ref < 0);
np->grant_rx_ref[id] = ref;
pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0]));
req = RING_GET_REQUEST(&np->rx, req_prod + i);
gnttab_grant_foreign_access_ref(ref,
np->xbdev->otherend_id,
pfn_to_mfn(pfn),
0);
req->id = id;
req->gref = ref;
}
wmb(); /* barrier so backend seens requests */
/* Above is a suitable barrier to ensure backend will see requests. */
np->rx.req_prod_pvt = req_prod + i;
push:
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
if (notify)
notify_remote_via_irq(np->netdev->irq);
//.........这里部分代码省略.........
示例10: iwl_tx_cmd_complete
/**
* iwl_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
* @rxb: Rx buffer to reclaim
* @handler_status: return value of the handler of the command
* (put in setup_rx_handlers)
*
* If an Rx buffer has an async callback associated with it the callback
* will be executed. The attached skb (if present) will only be freed
* if the callback returns 1
*/
void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb,
int handler_status)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
int txq_id = SEQ_TO_QUEUE(sequence);
int index = SEQ_TO_INDEX(sequence);
int cmd_index;
struct iwl_device_cmd *cmd;
struct iwl_cmd_meta *meta;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_queue *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
/* If a Tx command is being handled and it isn't in the actual
* command queue then there a command routing bug has been introduced
* in the queue management code. */
if (WARN(txq_id != trans_pcie->cmd_queue,
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
txq_id, trans_pcie->cmd_queue, sequence,
trans_pcie->txq[trans_pcie->cmd_queue].q.read_ptr,
trans_pcie->txq[trans_pcie->cmd_queue].q.write_ptr)) {
iwl_print_hex_error(trans, pkt, 32);
return;
}
spin_lock(&txq->lock);
cmd_index = get_cmd_index(&txq->q, index);
cmd = txq->cmd[cmd_index];
meta = &txq->meta[cmd_index];
txq->time_stamp = jiffies;
iwlagn_unmap_tfd(trans, meta, &txq->tfds[index],
DMA_BIDIRECTIONAL);
/* Input error checking is done when commands are added to queue. */
if (meta->flags & CMD_WANT_SKB) {
struct page *p = rxb_steal_page(rxb);
meta->source->resp_pkt = pkt;
meta->source->_rx_page_addr = (unsigned long)page_address(p);
meta->source->_rx_page_order = hw_params(trans).rx_page_order;
meta->source->handler_status = handler_status;
}
iwl_hcmd_queue_reclaim(trans, txq_id, index);
if (!(meta->flags & CMD_ASYNC)) {
if (!test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) {
IWL_WARN(trans,
"HCMD_ACTIVE already clear for command %s\n",
get_cmd_string(cmd->hdr.cmd));
}
clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
get_cmd_string(cmd->hdr.cmd));
wake_up(&trans->wait_command_queue);
}
meta->flags = 0;
spin_unlock(&txq->lock);
}
示例11: async_syndrome_val
//.........这里部分代码省略.........
}
if (!P(blocks, disks)) {
pq[0] = 0;
dma_flags |= DMA_PREP_PQ_DISABLE_P;
} else {
pq[0] = dma_map_page(dev, P(blocks, disks),
offset, len,
DMA_TO_DEVICE);
unmap->addr[j++] = pq[0];
unmap->to_cnt++;
}
if (!Q(blocks, disks)) {
pq[1] = 0;
dma_flags |= DMA_PREP_PQ_DISABLE_Q;
} else {
pq[1] = dma_map_page(dev, Q(blocks, disks),
offset, len,
DMA_TO_DEVICE);
unmap->addr[j++] = pq[1];
unmap->to_cnt++;
}
if (submit->flags & ASYNC_TX_FENCE)
dma_flags |= DMA_PREP_FENCE;
for (;;) {
tx = device->device_prep_dma_pq_val(chan, pq,
unmap->addr,
src_cnt,
coefs,
len, pqres,
dma_flags);
if (likely(tx))
break;
async_tx_quiesce(&submit->depend_tx);
dma_async_issue_pending(chan);
}
dma_set_unmap(tx, unmap);
async_tx_submit(chan, tx, submit);
return tx;
} else {
struct page *p_src = P(blocks, disks);
struct page *q_src = Q(blocks, disks);
enum async_tx_flags flags_orig = submit->flags;
dma_async_tx_callback cb_fn_orig = submit->cb_fn;
void *scribble = submit->scribble;
void *cb_param_orig = submit->cb_param;
void *p, *q, *s;
pr_debug("%s: (sync) disks: %d len: %zu\n",
__func__, disks, len);
/* caller must provide a temporary result buffer and
* allow the input parameters to be preserved
*/
BUG_ON(!spare || !scribble);
/* wait for any prerequisite operations */
async_tx_quiesce(&submit->depend_tx);
/* recompute p and/or q into the temporary buffer and then
* check to see the result matches the current value
*/
tx = NULL;
*pqres = 0;
if (p_src) {
init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
NULL, NULL, scribble);
tx = async_xor(spare, blocks, offset, disks-2, len, submit);
async_tx_quiesce(&tx);
p = page_address(p_src) + offset;
s = page_address(spare) + offset;
*pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
}
if (q_src) {
P(blocks, disks) = NULL;
Q(blocks, disks) = spare;
init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
tx = async_gen_syndrome(blocks, offset, disks, len, submit);
async_tx_quiesce(&tx);
q = page_address(q_src) + offset;
s = page_address(spare) + offset;
*pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
}
/* restore P, Q and submit */
P(blocks, disks) = p_src;
Q(blocks, disks) = q_src;
submit->cb_fn = cb_fn_orig;
submit->cb_param = cb_param_orig;
submit->flags = flags_orig;
async_tx_sync_epilog(submit);
return NULL;
}
}
示例12: page_is_zero
static int page_is_zero(struct page *p, unsigned int offset, size_t len)
{
return !memchr_inv(page_address(p) + offset, 0, len);
}
示例13: reiserfs_xattr_set_handle
/*
* inode->i_mutex: down
*/
int
reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
struct inode *inode, const char *name,
const void *buffer, size_t buffer_size, int flags)
{
int err = 0;
struct dentry *dentry;
struct page *page;
char *data;
size_t file_pos = 0;
size_t buffer_pos = 0;
size_t new_size;
__u32 xahash = 0;
if (get_inode_sd_version(inode) == STAT_DATA_V1)
return -EOPNOTSUPP;
reiserfs_write_unlock(inode->i_sb);
if (!buffer) {
err = lookup_and_delete_xattr(inode, name);
reiserfs_write_lock(inode->i_sb);
return err;
}
dentry = xattr_lookup(inode, name, flags);
if (IS_ERR(dentry)) {
reiserfs_write_lock(inode->i_sb);
return PTR_ERR(dentry);
}
down_write(&REISERFS_I(inode)->i_xattr_sem);
reiserfs_write_lock(inode->i_sb);
xahash = xattr_hash(buffer, buffer_size);
while (buffer_pos < buffer_size || buffer_pos == 0) {
size_t chunk;
size_t skip = 0;
size_t page_offset = (file_pos & (PAGE_CACHE_SIZE - 1));
if (buffer_size - buffer_pos > PAGE_CACHE_SIZE)
chunk = PAGE_CACHE_SIZE;
else
chunk = buffer_size - buffer_pos;
page = reiserfs_get_page(dentry->d_inode, file_pos);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto out_unlock;
}
lock_page(page);
data = page_address(page);
if (file_pos == 0) {
struct reiserfs_xattr_header *rxh;
skip = file_pos = sizeof(struct reiserfs_xattr_header);
if (chunk + skip > PAGE_CACHE_SIZE)
chunk = PAGE_CACHE_SIZE - skip;
rxh = (struct reiserfs_xattr_header *)data;
rxh->h_magic = cpu_to_le32(REISERFS_XATTR_MAGIC);
rxh->h_hash = cpu_to_le32(xahash);
}
err = __reiserfs_write_begin(page, page_offset, chunk + skip);
if (!err) {
if (buffer)
memcpy(data + skip, buffer + buffer_pos, chunk);
err = reiserfs_commit_write(NULL, page, page_offset,
page_offset + chunk +
skip);
}
unlock_page(page);
reiserfs_put_page(page);
buffer_pos += chunk;
file_pos += chunk;
skip = 0;
if (err || buffer_size == 0 || !buffer)
break;
}
new_size = buffer_size + sizeof(struct reiserfs_xattr_header);
if (!err && new_size < i_size_read(dentry->d_inode)) {
struct iattr newattrs = {
.ia_ctime = current_fs_time(inode->i_sb),
.ia_size = new_size,
.ia_valid = ATTR_SIZE | ATTR_CTIME,
};
reiserfs_write_unlock(inode->i_sb);
mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_XATTR);
inode_dio_wait(dentry->d_inode);
reiserfs_write_lock(inode->i_sb);
err = reiserfs_setattr(dentry, &newattrs);
mutex_unlock(&dentry->d_inode->i_mutex);
} else
//.........这里部分代码省略.........
示例14: f2fs_setxattr
int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
const void *value, size_t value_len, struct page *ipage)
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_xattr_header *header = NULL;
struct f2fs_xattr_entry *here, *last;
struct page *page;
void *base_addr;
int error, found, free, newsize;
size_t name_len;
char *pval;
int ilock;
if (name == NULL)
return -EINVAL;
if (value == NULL)
value_len = 0;
name_len = strlen(name);
if (name_len > F2FS_NAME_LEN || value_len > MAX_VALUE_LEN)
return -ERANGE;
f2fs_balance_fs(sbi);
ilock = mutex_lock_op(sbi);
if (!fi->i_xattr_nid) {
/* Allocate new attribute block */
struct dnode_of_data dn;
if (!alloc_nid(sbi, &fi->i_xattr_nid)) {
error = -ENOSPC;
goto exit;
}
set_new_dnode(&dn, inode, NULL, NULL, fi->i_xattr_nid);
mark_inode_dirty(inode);
page = new_node_page(&dn, XATTR_NODE_OFFSET, ipage);
if (IS_ERR(page)) {
alloc_nid_failed(sbi, fi->i_xattr_nid);
fi->i_xattr_nid = 0;
error = PTR_ERR(page);
goto exit;
}
alloc_nid_done(sbi, fi->i_xattr_nid);
base_addr = page_address(page);
header = XATTR_HDR(base_addr);
header->h_magic = cpu_to_le32(F2FS_XATTR_MAGIC);
header->h_refcount = cpu_to_le32(1);
} else {
/* The inode already has an extended attribute block. */
page = get_node_page(sbi, fi->i_xattr_nid);
if (IS_ERR(page)) {
error = PTR_ERR(page);
goto exit;
}
base_addr = page_address(page);
header = XATTR_HDR(base_addr);
}
if (le32_to_cpu(header->h_magic) != F2FS_XATTR_MAGIC) {
error = -EIO;
goto cleanup;
}
/* find entry with wanted name. */
found = 0;
list_for_each_xattr(here, base_addr) {
if (here->e_name_index != name_index)
continue;
if (here->e_name_len != name_len)
continue;
if (!memcmp(here->e_name, name, name_len)) {
found = 1;
break;
}
}
last = here;
while (!IS_XATTR_LAST_ENTRY(last))
last = XATTR_NEXT_ENTRY(last);
newsize = XATTR_ALIGN(sizeof(struct f2fs_xattr_entry) +
name_len + value_len);
/* 1. Check space */
if (value) {
/* If value is NULL, it is remove operation.
* In case of update operation, we caculate free.
*/
free = MIN_OFFSET - ((char *)last - (char *)header);
if (found)
free = free - ENTRY_SIZE(here);
//.........这里部分代码省略.........
示例15: reiserfs_xattr_get
/*
* inode->i_mutex: down
*/
int
reiserfs_xattr_get(struct inode *inode, const char *name, void *buffer,
size_t buffer_size)
{
ssize_t err = 0;
struct dentry *dentry;
size_t isize;
size_t file_pos = 0;
size_t buffer_pos = 0;
struct page *page;
__u32 hash = 0;
if (name == NULL)
return -EINVAL;
/* We can't have xattrs attached to v1 items since they don't have
* generation numbers */
if (get_inode_sd_version(inode) == STAT_DATA_V1)
return -EOPNOTSUPP;
dentry = xattr_lookup(inode, name, XATTR_REPLACE);
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
goto out;
}
down_read(&REISERFS_I(inode)->i_xattr_sem);
isize = i_size_read(dentry->d_inode);
/* Just return the size needed */
if (buffer == NULL) {
err = isize - sizeof(struct reiserfs_xattr_header);
goto out_unlock;
}
if (buffer_size < isize - sizeof(struct reiserfs_xattr_header)) {
err = -ERANGE;
goto out_unlock;
}
while (file_pos < isize) {
size_t chunk;
char *data;
size_t skip = 0;
if (isize - file_pos > PAGE_CACHE_SIZE)
chunk = PAGE_CACHE_SIZE;
else
chunk = isize - file_pos;
page = reiserfs_get_page(dentry->d_inode, file_pos);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto out_unlock;
}
lock_page(page);
data = page_address(page);
if (file_pos == 0) {
struct reiserfs_xattr_header *rxh =
(struct reiserfs_xattr_header *)data;
skip = file_pos = sizeof(struct reiserfs_xattr_header);
chunk -= skip;
/* Magic doesn't match up.. */
if (rxh->h_magic != cpu_to_le32(REISERFS_XATTR_MAGIC)) {
unlock_page(page);
reiserfs_put_page(page);
reiserfs_warning(inode->i_sb, "jdm-20001",
"Invalid magic for xattr (%s) "
"associated with %k", name,
INODE_PKEY(inode));
err = -EIO;
goto out_unlock;
}
hash = le32_to_cpu(rxh->h_hash);
}
memcpy(buffer + buffer_pos, data + skip, chunk);
unlock_page(page);
reiserfs_put_page(page);
file_pos += chunk;
buffer_pos += chunk;
skip = 0;
}
err = isize - sizeof(struct reiserfs_xattr_header);
if (xattr_hash(buffer, isize - sizeof(struct reiserfs_xattr_header)) !=
hash) {
reiserfs_warning(inode->i_sb, "jdm-20002",
"Invalid hash for xattr (%s) associated "
"with %k", name, INODE_PKEY(inode));
err = -EIO;
}
out_unlock:
up_read(&REISERFS_I(inode)->i_xattr_sem);
dput(dentry);
//.........这里部分代码省略.........