本文整理汇总了C++中crypto_tfm_alg_blocksize函数的典型用法代码示例。如果您正苦于以下问题:C++ crypto_tfm_alg_blocksize函数的具体用法?C++ crypto_tfm_alg_blocksize怎么用?C++ crypto_tfm_alg_blocksize使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了crypto_tfm_alg_blocksize函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: crypto_hmac_final
void crypto_hmac_final(struct crypto_tfm *tfm, u8 *key,
unsigned int *keylen, u8 *out)
{
unsigned int i;
struct scatterlist tmp;
char *opad = tfm->crt_digest.dit_hmac_block;
if (*keylen > crypto_tfm_alg_blocksize(tfm)) {
hash_key(tfm, key, *keylen);
*keylen = crypto_tfm_alg_digestsize(tfm);
}
crypto_digest_final(tfm, out);
memset(opad, 0, crypto_tfm_alg_blocksize(tfm));
memcpy(opad, key, *keylen);
for (i = 0; i < crypto_tfm_alg_blocksize(tfm); i++)
opad[i] ^= 0x5c;
tmp.page = virt_to_page(opad);
tmp.offset = offset_in_page(opad);
tmp.length = crypto_tfm_alg_blocksize(tfm);
crypto_digest_init(tfm);
crypto_digest_update(tfm, &tmp, 1);
tmp.page = virt_to_page(out);
tmp.offset = offset_in_page(out);
tmp.length = crypto_tfm_alg_digestsize(tfm);
crypto_digest_update(tfm, &tmp, 1);
crypto_digest_final(tfm, out);
}
示例2: crypto_cbc_init_tfm
static int crypto_cbc_init_tfm(struct crypto_tfm *tfm)
{
struct crypto_instance *inst = (void *)tfm->__crt_alg;
struct crypto_spawn *spawn = crypto_instance_ctx(inst);
struct crypto_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
switch (crypto_tfm_alg_blocksize(tfm)) {
case 8:
ctx->xor = xor_64;
break;
case 16:
ctx->xor = xor_128;
break;
default:
if (crypto_tfm_alg_blocksize(tfm) % 4)
ctx->xor = xor_byte;
else
ctx->xor = xor_quad;
}
tfm = crypto_spawn_tfm(spawn);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
ctx->child = crypto_cipher_cast(tfm);
return 0;
}
示例3: crypto_xcbc_init
int crypto_xcbc_init(struct crypto_tfm *tfm, u8 *key, unsigned int keylen)
{
struct xcbc_ops *ops = (struct xcbc_ops*)tfm->crt_cipher.cit_xcbc_block;
ops->len = 0;
memset(ops->prev, 0, crypto_tfm_alg_blocksize(tfm));
memset(tfm->crt_cipher.cit_iv, 0, crypto_tfm_alg_blocksize(tfm));
return _crypto_xcbc_init(tfm, key, keylen);
}
示例4: crypto_alloc_hmac_block
int crypto_alloc_hmac_block(struct crypto_tfm *tfm)
{
int ret = 0;
BUG_ON(!crypto_tfm_alg_blocksize(tfm));
tfm->crt_digest.dit_hmac_block = kmalloc(crypto_tfm_alg_blocksize(tfm),
GFP_KERNEL);
if (tfm->crt_digest.dit_hmac_block == NULL)
ret = -ENOMEM;
return ret;
}
示例5: qce_ahash_export
static int qce_ahash_export(struct ahash_request *req, void *out)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
unsigned long flags = rctx->flags;
unsigned int digestsize = crypto_ahash_digestsize(ahash);
unsigned int blocksize =
crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
struct sha1_state *out_state = out;
out_state->count = rctx->count;
qce_cpu_to_be32p_array((__be32 *)out_state->state,
rctx->digest, digestsize);
memcpy(out_state->buffer, rctx->buf, blocksize);
} else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
struct sha256_state *out_state = out;
out_state->count = rctx->count;
qce_cpu_to_be32p_array((__be32 *)out_state->state,
rctx->digest, digestsize);
memcpy(out_state->buf, rctx->buf, blocksize);
} else {
return -EINVAL;
}
return 0;
}
示例6: qce_import_common
static int qce_import_common(struct ahash_request *req, u64 in_count,
const u32 *state, const u8 *buffer, bool hmac)
{
struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
unsigned int digestsize = crypto_ahash_digestsize(ahash);
unsigned int blocksize;
u64 count = in_count;
blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
rctx->count = in_count;
memcpy(rctx->buf, buffer, blocksize);
if (in_count <= blocksize) {
rctx->first_blk = 1;
} else {
rctx->first_blk = 0;
/*
* For HMAC, there is a hardware padding done when first block
* is set. Therefore the byte_count must be incremened by 64
* after the first block operation.
*/
if (hmac)
count += SHA_PADDING;
}
rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK);
rctx->byte_count[1] = (__force __be32)(count >> 32);
qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state,
digestsize);
rctx->buflen = (unsigned int)(in_count & (blocksize - 1));
return 0;
}
示例7: s390_sha_update
void s390_sha_update(struct crypto_tfm *tfm, const u8 *data, unsigned int len)
{
struct s390_sha_ctx *ctx = crypto_tfm_ctx(tfm);
unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
unsigned int index;
int ret;
/* how much is already in the buffer? */
index = ctx->count & (bsize - 1);
ctx->count += len;
if ((index + len) < bsize)
goto store;
/* process one stored block */
if (index) {
memcpy(ctx->buf + index, data, bsize - index);
ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, bsize);
BUG_ON(ret != bsize);
data += bsize - index;
len -= bsize - index;
}
/* process as many blocks as possible */
if (len >= bsize) {
ret = crypt_s390_kimd(ctx->func, ctx->state, data,
len & ~(bsize - 1));
BUG_ON(ret != (len & ~(bsize - 1)));
data += ret;
len -= ret;
}
store:
if (len)
memcpy(ctx->buf + index , data, len);
}
示例8: crypto_alloc_xcbc_block
int crypto_alloc_xcbc_block(struct crypto_tfm *tfm)
{
struct xcbc_ops *ops;
BUG_ON(!crypto_tfm_alg_blocksize(tfm));
if (crypto_tfm_alg_blocksize(tfm) != 16)
return 0;
ops = (struct xcbc_ops*)kmalloc(sizeof(*ops) +
+ crypto_tfm_alg_blocksize(tfm), GFP_KERNEL);
if (ops == NULL)
return -ENOMEM;
ops->len = 0;
ops->prev = (u8*)(ops + 1);
tfm->crt_cipher.cit_xcbc_block = ops;
return 0;
}
示例9: _crypto_xcbc_init
static int _crypto_xcbc_init(struct crypto_tfm *tfm, u8 *key, unsigned int keylen)
{
const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
u8 key1[bsize];
int err;
if (!(tfm->crt_cipher.cit_mode & CRYPTO_TFM_MODE_CBC))
return -EINVAL;
if (keylen != crypto_tfm_alg_blocksize(tfm))
return -EINVAL;
if ((err = crypto_cipher_setkey(tfm, key, keylen)))
return err;
tfm->__crt_alg->cra_cipher.cia_encrypt(crypto_tfm_ctx(tfm), key1, (const u8*)k1);
return crypto_cipher_setkey(tfm, key1, bsize);
}
示例10: mv_cesa_ahmac_setkey
static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
const u8 *key, unsigned int keylen,
void *istate, void *ostate)
{
struct ahash_request *req;
struct crypto_ahash *tfm;
unsigned int blocksize;
u8 *ipad = NULL;
u8 *opad;
int ret;
tfm = crypto_alloc_ahash(hash_alg_name, CRYPTO_ALG_TYPE_AHASH,
CRYPTO_ALG_TYPE_AHASH_MASK);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req) {
ret = -ENOMEM;
goto free_ahash;
}
crypto_ahash_clear_flags(tfm, ~0);
blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
ipad = kzalloc(2 * blocksize, GFP_KERNEL);
if (!ipad) {
ret = -ENOMEM;
goto free_req;
}
opad = ipad + blocksize;
ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
if (ret)
goto free_ipad;
ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
if (ret)
goto free_ipad;
ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
free_ipad:
kfree(ipad);
free_req:
ahash_request_free(req);
free_ahash:
crypto_free_ahash(tfm);
return ret;
}
示例11: crypto_hmac_init
void crypto_hmac_init(struct crypto_tfm *tfm, u8 *key, unsigned int *keylen)
{
unsigned int i;
struct scatterlist tmp;
char *ipad = tfm->crt_digest.dit_hmac_block;
if (*keylen > crypto_tfm_alg_blocksize(tfm)) {
hash_key(tfm, key, *keylen);
*keylen = crypto_tfm_alg_digestsize(tfm);
}
memset(ipad, 0, crypto_tfm_alg_blocksize(tfm));
memcpy(ipad, key, *keylen);
for (i = 0; i < crypto_tfm_alg_blocksize(tfm); i++)
ipad[i] ^= 0x36;
sg_set_buf(&tmp, ipad, crypto_tfm_alg_blocksize(tfm));
crypto_digest_init(tfm);
crypto_digest_update(tfm, &tmp, 1);
}
示例12: cipher_crypt_unaligned
static void cipher_crypt_unaligned(void (*fn)(struct crypto_tfm *, u8 *,
const u8 *),
struct crypto_tfm *tfm,
u8 *dst, const u8 *src)
{
unsigned long alignmask = crypto_tfm_alg_alignmask(tfm);
unsigned int size = crypto_tfm_alg_blocksize(tfm);
u8 buffer[size + alignmask];
u8 *tmp = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
memcpy(tmp, src, size);
fn(tfm, tmp, tmp);
memcpy(dst, tmp, size);
}
示例13: crypt
/*
* Generic encrypt/decrypt wrapper for ciphers, handles operations across
* multiple page boundaries by using temporary blocks. In user context,
* the kernel is given a chance to schedule us once per block.
*/
static int crypt(struct crypto_tfm *tfm,
struct scatterlist *dst,
struct scatterlist *src,
unsigned int nbytes, cryptfn_t crfn,
procfn_t prfn, int enc, void *info)
{
struct scatter_walk walk_in, walk_out;
const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
u8 tmp_src[bsize];
u8 tmp_dst[bsize];
if (!nbytes)
return 0;
if (nbytes % bsize) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN;
return -EINVAL;
}
scatterwalk_start(&walk_in, src);
scatterwalk_start(&walk_out, dst);
for(;;) {
u8 *src_p, *dst_p;
int in_place;
scatterwalk_map(&walk_in);
scatterwalk_map(&walk_out);
src_p = scatterwalk_whichbuf(&walk_in, bsize, tmp_src);
dst_p = scatterwalk_whichbuf(&walk_out, bsize, tmp_dst);
in_place = scatterwalk_samebuf(&walk_in, &walk_out,
src_p, dst_p);
nbytes -= bsize;
scatterwalk_copychunks(src_p, &walk_in, bsize, 0);
prfn(tfm, dst_p, src_p, crfn, enc, info, in_place);
scatterwalk_done(&walk_in, nbytes);
scatterwalk_copychunks(dst_p, &walk_out, bsize, 1);
scatterwalk_done(&walk_out, nbytes);
if (!nbytes)
return 0;
crypto_yield(tfm);
}
}
示例14: crypto_xcbc_final
int crypto_xcbc_final(struct crypto_tfm *tfm, u8 *key, unsigned int keylen, u8 *out)
{
struct xcbc_ops *ops = (struct xcbc_ops*)tfm->crt_cipher.cit_xcbc_block;
const unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
int ret = 0;
if (!(tfm->crt_cipher.cit_mode & CRYPTO_TFM_MODE_CBC))
return -EINVAL;
if (keylen != bsize)
return -EINVAL;
if (ops->len == bsize) {
u8 key2[bsize];
if ((ret = crypto_cipher_setkey(tfm, key, keylen)))
return ret;
tfm->__crt_alg->cra_cipher.cia_encrypt(crypto_tfm_ctx(tfm), key2, (const u8*)k2);
tfm->crt_u.cipher.cit_xor_block(tfm->crt_cipher.cit_iv, ops->prev);
tfm->crt_u.cipher.cit_xor_block(tfm->crt_cipher.cit_iv, key2);
_crypto_xcbc_init(tfm, key, keylen);
tfm->__crt_alg->cra_cipher.cia_encrypt(crypto_tfm_ctx(tfm), out, tfm->crt_cipher.cit_iv);
} else {
u8 key3[bsize];
unsigned int rlen;
u8 *p = ops->prev + ops->len;
*p = 0x80;
p++;
rlen = bsize - ops->len -1;
if (rlen)
memset(p, 0, rlen);
if ((ret = crypto_cipher_setkey(tfm, key, keylen)))
return ret;
tfm->__crt_alg->cra_cipher.cia_encrypt(crypto_tfm_ctx(tfm), key3, (const u8*)k3);
tfm->crt_u.cipher.cit_xor_block(tfm->crt_cipher.cit_iv, ops->prev);
tfm->crt_u.cipher.cit_xor_block(tfm->crt_cipher.cit_iv, key3);
_crypto_xcbc_init(tfm, key, keylen);
tfm->__crt_alg->cra_cipher.cia_encrypt(crypto_tfm_ctx(tfm), out, tfm->crt_cipher.cit_iv);
}
return ret;
}
示例15: cbc_process
static void cbc_process(struct crypto_tfm *tfm, u8 *dst, u8 *src,
cryptfn_t fn, int enc, void *info, int in_place)
{
u8 *iv = info;
/* Null encryption */
if (!iv)
return;
if (enc) {
tfm->crt_u.cipher.cit_xor_block(iv, src);
fn(crypto_tfm_ctx(tfm), dst, iv);
memcpy(iv, dst, crypto_tfm_alg_blocksize(tfm));
} else {
u8 stack[in_place ? crypto_tfm_alg_blocksize(tfm) : 0];
u8 *buf = in_place ? stack : dst;
fn(crypto_tfm_ctx(tfm), buf, src);
tfm->crt_u.cipher.cit_xor_block(buf, iv);
memcpy(iv, src, crypto_tfm_alg_blocksize(tfm));
if (buf != dst)
memcpy(dst, buf, crypto_tfm_alg_blocksize(tfm));
}
}