本文整理汇总了C++中LWIP_MEM_ALIGN_SIZE函数的典型用法代码示例。如果您正苦于以下问题:C++ LWIP_MEM_ALIGN_SIZE函数的具体用法?C++ LWIP_MEM_ALIGN_SIZE怎么用?C++ LWIP_MEM_ALIGN_SIZE使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了LWIP_MEM_ALIGN_SIZE函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: mem_malloc
/**
* Allocate memory: determine the smallest pool that is big enough
* to contain an element of 'size' and get an element from that pool.
*
* @param size the size in bytes of the memory needed
* @return a pointer to the allocated memory or NULL if the pool is empty
*/
void *
mem_malloc(mem_size_t size)
{
void *ret;
struct memp_malloc_helper *element;
memp_t poolnr;
mem_size_t required_size = size + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper));
for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST; poolnr = (memp_t)(poolnr + 1)) {
#if MEM_USE_POOLS_TRY_BIGGER_POOL
again:
#endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
/* is this pool big enough to hold an element of the required size
plus a struct memp_malloc_helper that saves the pool this element came from? */
if (required_size <= memp_pools[poolnr]->size) {
break;
}
}
if (poolnr > MEMP_POOL_LAST) {
LWIP_ASSERT("mem_malloc(): no pool is that big!", 0);
MEM_STATS_INC(err);
return NULL;
}
element = (struct memp_malloc_helper*)memp_malloc(poolnr);
if (element == NULL) {
/* No need to DEBUGF or ASSERT: This error is already
taken care of in memp.c */
#if MEM_USE_POOLS_TRY_BIGGER_POOL
/** Try a bigger pool if this one is empty! */
if (poolnr < MEMP_POOL_LAST) {
poolnr++;
goto again;
}
#endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
MEM_STATS_INC(err);
return NULL;
}
/* save the pool number this element came from */
element->poolnr = poolnr;
/* and return a pointer to the memory directly after the struct memp_malloc_helper */
ret = (u8_t*)element + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper));
#if MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS)
/* truncating to u16_t is safe because struct memp_desc::size is u16_t */
element->size = (u16_t)size;
MEM_STATS_INC_USED(used, element->size);
#endif /* MEMP_OVERFLOW_CHECK || (LWIP_STATS && MEM_STATS) */
#if MEMP_OVERFLOW_CHECK
/* initialize unused memory (diff between requested size and selected pool's size) */
memset((u8_t*)ret + size, 0xcd, memp_pools[poolnr]->size - size);
#endif /* MEMP_OVERFLOW_CHECK */
return ret;
}
示例2: pbuf_alloced_custom
/**
* @ingroup pbuf
* Initialize a custom pbuf (already allocated).
*
* @param l flag to define header size
* @param length size of the pbuf's payload
* @param type type of the pbuf (only used to treat the pbuf accordingly, as
* this function allocates no memory)
* @param p pointer to the custom pbuf to initialize (already allocated)
* @param payload_mem pointer to the buffer that is used for payload and headers,
* must be at least big enough to hold 'length' plus the header size,
* may be NULL if set later.
* ATTENTION: The caller is responsible for correct alignment of this buffer!!
* @param payload_mem_len the size of the 'payload_mem' buffer, must be at least
* big enough to hold 'length' plus the header size
*/
struct pbuf*
pbuf_alloced_custom(pbuf_layer l, u16_t length, pbuf_type type, struct pbuf_custom *p,
void *payload_mem, u16_t payload_mem_len)
{
u16_t offset;
LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloced_custom(length=%"U16_F")\n", length));
/* determine header offset */
switch (l) {
case PBUF_TRANSPORT:
/* add room for transport (often TCP) layer header */
offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN;
break;
case PBUF_IP:
/* add room for IP layer header */
offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN;
break;
case PBUF_LINK:
/* add room for link layer header */
offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN;
break;
case PBUF_RAW_TX:
/* add room for encapsulating link layer headers (e.g. 802.11) */
offset = PBUF_LINK_ENCAPSULATION_HLEN;
break;
case PBUF_RAW:
offset = 0;
break;
default:
LWIP_ASSERT("pbuf_alloced_custom: bad pbuf layer", 0);
return NULL;
}
if (LWIP_MEM_ALIGN_SIZE(offset) + length > payload_mem_len) {
LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_WARNING, ("pbuf_alloced_custom(length=%"U16_F") buffer too short\n", length));
return NULL;
}
p->pbuf.next = NULL;
if (payload_mem != NULL) {
p->pbuf.payload = (u8_t *)payload_mem + LWIP_MEM_ALIGN_SIZE(offset);
} else {
p->pbuf.payload = NULL;
}
p->pbuf.flags = PBUF_FLAG_IS_CUSTOM;
p->pbuf.len = p->pbuf.tot_len = length;
p->pbuf.type = type;
p->pbuf.ref = 1;
return &p->pbuf;
}
示例3: pbuf_alloc
struct pbuf *
pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type)
{
struct pbuf *p;
u16_t offset = 0;
offset += 16;
/* If pbuf is to be allocated in RAM, allocate memory for it. */
p = (struct pbuf*)rt_malloc(LWIP_MEM_ALIGN_SIZE(SIZEOF_STRUCT_PBUF + offset) + LWIP_MEM_ALIGN_SIZE(length));
if (p == RT_NULL) return RT_NULL;
/* Set up internal structure of the pbuf. */
p->payload = LWIP_MEM_ALIGN((void *)((u8_t *)p + SIZEOF_STRUCT_PBUF + offset));
p->len = length;
return p;
}
示例4: mem_free
/**
* Free memory previously allocated by mem_malloc. Loads the pool number
* and calls memp_free with that pool number to put the element back into
* its pool
*
* @param rmem the memory element to free
*/
void
mem_free(void *rmem)
{
struct memp_malloc_helper *hmem;
LWIP_ASSERT("rmem != NULL", (rmem != NULL));
LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
/* get the original struct memp_malloc_helper */
/* cast through void* to get rid of alignment warnings */
hmem = (struct memp_malloc_helper*)(void*)((u8_t*)rmem - LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)));
LWIP_ASSERT("hmem != NULL", (hmem != NULL));
LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem)));
LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX));
MEM_STATS_DEC_USED(used, hmem->size);
#if MEMP_OVERFLOW_CHECK
{
u16_t i;
LWIP_ASSERT("MEM_USE_POOLS: invalid chunk size",
hmem->size <= memp_pools[hmem->poolnr]->size);
/* check that unused memory remained untouched (diff between requested size and selected pool's size) */
for (i = hmem->size; i < memp_pools[hmem->poolnr]->size; i++) {
u8_t data = *((u8_t*)rmem + i);
LWIP_ASSERT("MEM_USE_POOLS: mem overflow detected", data == 0xcd);
}
}
#endif /* MEMP_OVERFLOW_CHECK */
/* and put it in the pool we saved earlier */
memp_free(hmem->poolnr, hmem);
}
示例5: mem_free
/**
* Free memory previously allocated by mem_malloc. Loads the pool number
* and calls memp_free with that pool number to put the element back into
* its pool
*
* @param rmem the memory element to free
*/
void
mem_free(void *rmem)
{
struct memp_malloc_helper *hmem;
LWIP_ASSERT("rmem != NULL", (rmem != NULL));
LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
/* get the original struct memp_malloc_helper */
hmem = (struct memp_malloc_helper*)(void*)((u8_t*)rmem - LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)));
LWIP_ASSERT("hmem != NULL", (hmem != NULL));
LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem)));
LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX));
#if MEMP_OVERFLOW_CHECK
{
u16_t i;
LWIP_ASSERT("MEM_USE_POOLS: invalid chunk size",
hmem->size <= memp_sizes[hmem->poolnr]);
/* check that unused memory remained untouched */
for (i = hmem->size; i < memp_sizes[hmem->poolnr]; i++) {
u8_t data = *((u8_t*)rmem + i);
LWIP_ASSERT("MEM_USE_POOLS: mem overflow detected", data == 0xcd);
}
}
#endif /* MEMP_OVERFLOW_CHECK */
/* and put it in the pool we saved earlier */
memp_free(hmem->poolnr, hmem);
}
示例6: LWIP_MEM_ALIGN_SIZE
/**
* Allocate memory: determine the smallest pool that is big enough
* to contain an element of 'size' and get an element from that pool.
*
* @param size the size in bytes of the memory needed
* @return a pointer to the allocated memory or NULL if the pool is empty
*/
void *mem_malloc(mem_size_t size) {
void *ret;
struct memp_malloc_helper *element;
memp_t poolnr;
mem_size_t required_size =
size + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper));
for (poolnr = MEMP_POOL_FIRST; poolnr <= MEMP_POOL_LAST;
poolnr = (memp_t)(poolnr + 1)) {
#if MEM_USE_POOLS_TRY_BIGGER_POOL
again:
#endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
/* is this pool big enough to hold an element of the required size
plus a struct memp_malloc_helper that saves the pool this element came
from? */
if (required_size <= memp_sizes[poolnr]) {
break;
}
}
if (poolnr > MEMP_POOL_LAST) {
LWIP_ASSERT("mem_malloc(): no pool is that big!", 0);
return NULL;
}
element = (struct memp_malloc_helper *) memp_malloc(poolnr);
if (element == NULL) {
/* No need to DEBUGF or ASSERT: This error is already
taken care of in memp.c */
#if MEM_USE_POOLS_TRY_BIGGER_POOL
/** Try a bigger pool if this one is empty! */
if (poolnr < MEMP_POOL_LAST) {
poolnr++;
goto again;
}
#endif /* MEM_USE_POOLS_TRY_BIGGER_POOL */
return NULL;
}
/* save the pool number this element came from */
element->poolnr = poolnr;
/* and return a pointer to the memory directly after the struct
* memp_malloc_helper */
ret =
(u8_t *) element + LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper));
return ret;
}
示例7: lwip_comm_mem_malloc
//lwip内核部分,内存申请
//返回值:0,成功;
// 其他,失败
u8 lwip_comm_mem_malloc(void)
{
u32 mempsize;
u32 ramheapsize;
mempsize=memp_get_memorysize(); //得到memp_memory数组大小
memp_memory=mymalloc(SRAMIN,mempsize); //为memp_memory申请内存
ramheapsize=LWIP_MEM_ALIGN_SIZE(MEM_SIZE)+2*LWIP_MEM_ALIGN_SIZE(4*3)+MEM_ALIGNMENT;//得到ram heap大小
ram_heap=mymalloc(SRAMIN,ramheapsize); //为ram_heap申请内存
TCPIP_THREAD_TASK_STK=mymalloc(SRAMIN,TCPIP_THREAD_STACKSIZE*4);//给内核任务申请堆栈
LWIP_DHCP_TASK_STK=mymalloc(SRAMIN,LWIP_DHCP_STK_SIZE*4); //给dhcp任务堆栈申请内存空间
if(!memp_memory||!ram_heap||!TCPIP_THREAD_TASK_STK||!LWIP_DHCP_TASK_STK)//有申请失败的
{
lwip_comm_mem_free();
return 1;
}
return 0;
}
示例8: pbuf_alloced_custom
/**
* @ingroup pbuf
* Initialize a custom pbuf (already allocated).
* Example of custom pbuf usage: @ref zerocopyrx
*
* @param l header size
* @param length size of the pbuf's payload
* @param type type of the pbuf (only used to treat the pbuf accordingly, as
* this function allocates no memory)
* @param p pointer to the custom pbuf to initialize (already allocated)
* @param payload_mem pointer to the buffer that is used for payload and headers,
* must be at least big enough to hold 'length' plus the header size,
* may be NULL if set later.
* ATTENTION: The caller is responsible for correct alignment of this buffer!!
* @param payload_mem_len the size of the 'payload_mem' buffer, must be at least
* big enough to hold 'length' plus the header size
*/
struct pbuf *
pbuf_alloced_custom(pbuf_layer l, u16_t length, pbuf_type type, struct pbuf_custom *p,
void *payload_mem, u16_t payload_mem_len)
{
u16_t offset = (u16_t)l;
void *payload;
LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloced_custom(length=%"U16_F")\n", length));
if (LWIP_MEM_ALIGN_SIZE(offset) + length > payload_mem_len) {
LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_LEVEL_WARNING, ("pbuf_alloced_custom(length=%"U16_F") buffer too short\n", length));
return NULL;
}
if (payload_mem != NULL) {
payload = (u8_t *)payload_mem + LWIP_MEM_ALIGN_SIZE(offset);
} else {
payload = NULL;
}
pbuf_init_alloced_pbuf(&p->pbuf, payload, length, length, type, PBUF_FLAG_IS_CUSTOM);
return &p->pbuf;
}
示例9: lwip_comm_mem_malloc
//lwip内核部分,内存申请
//返回值:0,成功;
// 其他,失败
uint8_t lwip_comm_mem_malloc(void)
{
uint32_t mempsize;
uint32_t ramheapsize;
mempsize=memp_get_memorysize(); //得到memp_memory数组大小
memp_memory=mymalloc(SRAMEX,mempsize); //为memp_memory申请内存
printf("memp_memory内存大小为:%d\r\n",mempsize);
ramheapsize=LWIP_MEM_ALIGN_SIZE(MEM_SIZE)+2*LWIP_MEM_ALIGN_SIZE(4*3)+MEM_ALIGNMENT;//得到ram heap大小
ram_heap=mymalloc(SRAMEX,ramheapsize); //为ram_heap申请内存
printf("ram_heap内存大小为:%d\r\n",ramheapsize);
TCPIP_THREAD_TASK_STK=mymalloc(SRAMEX,TCPIP_THREAD_STACKSIZE*4); //给内核任务申请堆栈
//LWIP_DHCP_TASK_STK=mymalloc(SRAMEX,LWIP_DHCP_STK_SIZE*4); //给dhcp任务申请堆栈
if(!memp_memory) {
lwip_comm_mem_free();
return 1;
}
if(!ram_heap) {
lwip_comm_mem_free();
return 1;
}
if(!TCPIP_THREAD_TASK_STK) {
lwip_comm_mem_free();
return 1;
}
// if((!memp_memory)||(!ram_heap)||(!TCPIP_THREAD_TASK_STK));//||!LWIP_DHCP_TASK_STK) //有申请失败的
// {
// lwip_comm_mem_free();
// return 1;
// }
return 0;
}
示例10: tcp_pbuf_prealloc
static struct pbuf *ICACHE_FLASH_ATTR
tcp_pbuf_prealloc(pbuf_layer layer, u16_t length, u16_t max_length,
u16_t *oversize, struct tcp_pcb *pcb, u8_t apiflags,
u8_t first_seg)
{
struct pbuf *p;
u16_t alloc = length;
#if LWIP_NETIF_TX_SINGLE_PBUF
LWIP_UNUSED_ARG(max_length);
LWIP_UNUSED_ARG(pcb);
LWIP_UNUSED_ARG(apiflags);
LWIP_UNUSED_ARG(first_seg);
/* always create MSS-sized pbufs */
alloc = TCP_MSS;
#else /* LWIP_NETIF_TX_SINGLE_PBUF */
if (length < max_length) {
/* Should we allocate an oversized pbuf, or just the minimum
* length required? If tcp_write is going to be called again
* before this segment is transmitted, we want the oversized
* buffer. If the segment will be transmitted immediately, we can
* save memory by allocating only length. We use a simple
* heuristic based on the following information:
*
* Did the user set TCP_WRITE_FLAG_MORE?
*
* Will the Nagle algorithm defer transmission of this segment?
*/
if ((apiflags & TCP_WRITE_FLAG_MORE) ||
(!(pcb->flags & TF_NODELAY) &&
(!first_seg ||
pcb->unsent != NULL ||
pcb->unacked != NULL))) {
alloc = LWIP_MIN(max_length, LWIP_MEM_ALIGN_SIZE(length + TCP_OVERSIZE));
}
}
#endif /* LWIP_NETIF_TX_SINGLE_PBUF */
p = pbuf_alloc(layer, alloc, PBUF_RAM);
if (p == NULL) {
return NULL;
}
LWIP_ASSERT("need unchained pbuf", p->next == NULL);
*oversize = p->len - length;
/* trim p->len to the currently used size */
p->len = p->tot_len = length;
return p;
}
示例11: mem_free
/**
* Free memory previously allocated by mem_malloc. Loads the pool number
* and calls memp_free with that pool number to put the element back into
* its pool
*
* @param rmem the memory element to free
*/
void
mem_free(void *rmem)
{
struct memp_malloc_helper *hmem;
LWIP_ASSERT("rmem != NULL", (rmem != NULL));
LWIP_ASSERT("rmem == MEM_ALIGN(rmem)", (rmem == LWIP_MEM_ALIGN(rmem)));
/* get the original struct memp_malloc_helper */
hmem = (struct memp_malloc_helper*)(void*)((u8_t*)rmem - LWIP_MEM_ALIGN_SIZE(sizeof(struct memp_malloc_helper)));
LWIP_ASSERT("hmem != NULL", (hmem != NULL));
LWIP_ASSERT("hmem == MEM_ALIGN(hmem)", (hmem == LWIP_MEM_ALIGN(hmem)));
LWIP_ASSERT("hmem->poolnr < MEMP_MAX", (hmem->poolnr < MEMP_MAX));
/* and put it in the pool we saved earlier */
memp_free(hmem->poolnr, hmem);
}
示例12: mem_init
/**
* Zero the heap and initialize start, end and lowest-free
*/
void
mem_init(LWIP_MEM_CFG *mem_cfg)
{
struct mem *mem;
BOOL en = mem_cfg->enable;
UINT8 *start = mem_cfg->start;
UINT32 length = LWIP_MEM_ALIGN_SIZE(mem_cfg->length);
LWIP_ASSERT("Sanity check alignment",
(SIZEOF_STRUCT_MEM & (MEM_ALIGNMENT-1)) == 0);
if(en == TRUE)
{
ram_heap = (UINT8 *)start;
MEM_SIZE_ALIGNED = (length - (2*SIZEOF_STRUCT_MEM) - MEM_ALIGNMENT);
}
else
{
ram_heap = (UINT8 *)MALLOC(MEM_SIZE);
MEM_SIZE_ALIGNED = (MEM_SIZE - (2*SIZEOF_STRUCT_MEM) - MEM_ALIGNMENT);
}
/* align the heap */
ram = LWIP_MEM_ALIGN(ram_heap);
/* initialize the start of the heap */
mem = (struct mem *)ram;
mem->next = MEM_SIZE_ALIGNED;
mem->prev = 0;
mem->used = 0;
/* initialize the end of the heap */
ram_end = (struct mem *)&ram[MEM_SIZE_ALIGNED];
ram_end->used = 1;
ram_end->next = MEM_SIZE_ALIGNED;
ram_end->prev = MEM_SIZE_ALIGNED;
mem_sem = sys_sem_new(1);
/* initialize the lowest-free pointer to the start of the heap */
lfree = (struct mem *)ram;
#if MEM_STATS
lwip_stats.mem.avail = MEM_SIZE_ALIGNED;
#endif /* MEM_STATS */
}
示例13: pbuf_alloc
/**
* @ingroup pbuf
* Allocates a pbuf of the given type (possibly a chain for PBUF_POOL type).
*
* The actual memory allocated for the pbuf is determined by the
* layer at which the pbuf is allocated and the requested size
* (from the size parameter).
*
* @param layer flag to define header size
* @param length size of the pbuf's payload
* @param type this parameter decides how and where the pbuf
* should be allocated as follows:
*
* - PBUF_RAM: buffer memory for pbuf is allocated as one large
* chunk. This includes protocol headers as well.
* - PBUF_ROM: no buffer memory is allocated for the pbuf, even for
* protocol headers. Additional headers must be prepended
* by allocating another pbuf and chain in to the front of
* the ROM pbuf. It is assumed that the memory used is really
* similar to ROM in that it is immutable and will not be
* changed. Memory which is dynamic should generally not
* be attached to PBUF_ROM pbufs. Use PBUF_REF instead.
* - PBUF_REF: no buffer memory is allocated for the pbuf, even for
* protocol headers. It is assumed that the pbuf is only
* being used in a single thread. If the pbuf gets queued,
* then pbuf_take should be called to copy the buffer.
* - PBUF_POOL: the pbuf is allocated as a pbuf chain, with pbufs from
* the pbuf pool that is allocated during pbuf_init().
*
* @return the allocated pbuf. If multiple pbufs where allocated, this
* is the first pbuf of a pbuf chain.
*/
struct pbuf *
pbuf_alloc(pbuf_layer layer, u16_t length, pbuf_type type)
{
struct pbuf *p, *q, *r;
u16_t offset;
s32_t rem_len; /* remaining length */
LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc(length=%"U16_F")\n", length));
/* determine header offset */
switch (layer) {
case PBUF_TRANSPORT:
/* add room for transport (often TCP) layer header */
offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN + PBUF_TRANSPORT_HLEN;
break;
case PBUF_IP:
/* add room for IP layer header */
offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN + PBUF_IP_HLEN;
break;
case PBUF_LINK:
/* add room for link layer header */
offset = PBUF_LINK_ENCAPSULATION_HLEN + PBUF_LINK_HLEN;
break;
case PBUF_RAW_TX:
/* add room for encapsulating link layer headers (e.g. 802.11) */
offset = PBUF_LINK_ENCAPSULATION_HLEN;
break;
case PBUF_RAW:
/* no offset (e.g. RX buffers or chain successors) */
offset = 0;
break;
default:
LWIP_ASSERT("pbuf_alloc: bad pbuf layer", 0);
return NULL;
}
switch (type) {
case PBUF_POOL:
/* allocate head of pbuf chain into p */
p = (struct pbuf *)memp_malloc(MEMP_PBUF_POOL);
LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_alloc: allocated pbuf %p\n", (void *)p));
if (p == NULL) {
PBUF_POOL_IS_EMPTY();
return NULL;
}
p->type = type;
p->next = NULL;
p->if_idx = NETIF_NO_INDEX;
/* make the payload pointer point 'offset' bytes into pbuf data memory */
p->payload = LWIP_MEM_ALIGN((void *)((u8_t *)p + (SIZEOF_STRUCT_PBUF + offset)));
LWIP_ASSERT("pbuf_alloc: pbuf p->payload properly aligned",
((mem_ptr_t)p->payload % MEM_ALIGNMENT) == 0);
/* the total length of the pbuf chain is the requested size */
p->tot_len = length;
/* set the length of the first pbuf in the chain */
p->len = LWIP_MIN(length, PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset));
LWIP_ASSERT("check p->payload + p->len does not overflow pbuf",
((u8_t*)p->payload + p->len <=
(u8_t*)p + SIZEOF_STRUCT_PBUF + PBUF_POOL_BUFSIZE_ALIGNED));
LWIP_ASSERT("PBUF_POOL_BUFSIZE must be bigger than MEM_ALIGNMENT",
(PBUF_POOL_BUFSIZE_ALIGNED - LWIP_MEM_ALIGN_SIZE(offset)) > 0 );
/* set reference count (needed here in case we fail) */
p->ref = 1;
/* now allocate the tail of the pbuf chain */
/* remember first pbuf for linkage in next iteration */
r = p;
//.........这里部分代码省略.........
示例14: mem_malloc
/**
* Adam's mem_malloc() plus solution for bug #17922
* Allocate a block of memory with a minimum of 'size' bytes.
*
* @param size is the minimum size of the requested block in bytes.
* @return pointer to allocated memory or NULL if no free memory was found.
*
* Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT).
*/
void *
mem_malloc(mem_size_t size)
{
mem_size_t ptr, ptr2;
struct mem *mem, *mem2;
#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
u8_t local_mem_free_count = 0;
#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
LWIP_MEM_ALLOC_DECL_PROTECT();
if (size == 0) {
return NULL;
}
/* Expand the size of the allocated memory region so that we can
adjust for alignment. */
size = LWIP_MEM_ALIGN_SIZE(size);
if(size < MIN_SIZE_ALIGNED) {
/* every data block must be at least MIN_SIZE_ALIGNED long */
size = MIN_SIZE_ALIGNED;
}
if (size > MEM_SIZE_ALIGNED) {
return NULL;
}
/* protect the heap from concurrent access */
sys_mutex_lock(&mem_mutex);
LWIP_MEM_ALLOC_PROTECT();
#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
/* run as long as a mem_free disturbed mem_malloc or mem_trim */
do {
local_mem_free_count = 0;
#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
/* Scan through the heap searching for a free block that is big enough,
* beginning with the lowest free block.
*/
for (ptr = (mem_size_t)((u8_t *)lfree - ram); ptr < MEM_SIZE_ALIGNED - size;
ptr = ((struct mem *)(void *)&ram[ptr])->next) {
mem = (struct mem *)(void *)&ram[ptr];
#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
mem_free_count = 0;
LWIP_MEM_ALLOC_UNPROTECT();
/* allow mem_free or mem_trim to run */
LWIP_MEM_ALLOC_PROTECT();
if (mem_free_count != 0) {
/* If mem_free or mem_trim have run, we have to restart since they
could have altered our current struct mem. */
local_mem_free_count = 1;
break;
}
#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
if ((!mem->used) &&
(mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) {
/* mem is not used and at least perfect fit is possible:
* mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */
if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) {
/* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
* at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
* -> split large block, create empty remainder,
* remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
* mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
* struct mem would fit in but no data between mem2 and mem2->next
* @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
* region that couldn't hold data, but when mem->next gets freed,
* the 2 regions would be combined, resulting in more free memory
*/
ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
/* create mem2 struct */
mem2 = (struct mem *)(void *)&ram[ptr2];
mem2->used = 0;
mem2->next = mem->next;
mem2->prev = ptr;
/* and insert it between mem and mem->next */
mem->next = ptr2;
mem->used = 1;
if (mem2->next != MEM_SIZE_ALIGNED) {
((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
}
MEM_STATS_INC_USED(used, (size + SIZEOF_STRUCT_MEM));
} else {
/* (a mem2 struct does no fit into the user data space of mem and mem->next will always
* be used at this point: if not we have 2 unused structs in a row, plug_holes should have
* take care of this).
* -> near fit or excact fit: do not split, no mem2 creation
* also can't move mem->next directly behind mem, since mem->next
//.........这里部分代码省略.........
示例15: mem_trim
/**
* Shrink memory returned by mem_malloc().
*
* @param rmem pointer to memory allocated by mem_malloc the is to be shrinked
* @param newsize required size after shrinking (needs to be smaller than or
* equal to the previous size)
* @return for compatibility reasons: is always == rmem, at the moment
* or NULL if newsize is > old size, in which case rmem is NOT touched
* or freed!
*/
void *
mem_trim(void *rmem, mem_size_t newsize)
{
mem_size_t size;
mem_size_t ptr, ptr2;
struct mem *mem, *mem2;
/* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */
LWIP_MEM_FREE_DECL_PROTECT();
/* Expand the size of the allocated memory region so that we can
adjust for alignment. */
newsize = LWIP_MEM_ALIGN_SIZE(newsize);
if(newsize < MIN_SIZE_ALIGNED) {
/* every data block must be at least MIN_SIZE_ALIGNED long */
newsize = MIN_SIZE_ALIGNED;
}
if (newsize > MEM_SIZE_ALIGNED) {
return NULL;
}
LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
(u8_t *)rmem < (u8_t *)ram_end);
if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
SYS_ARCH_DECL_PROTECT(lev);
LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n"));
/* protect mem stats from concurrent access */
SYS_ARCH_PROTECT(lev);
MEM_STATS_INC(illegal);
SYS_ARCH_UNPROTECT(lev);
return rmem;
}
/* Get the corresponding struct mem ... */
mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
/* ... and its offset pointer */
ptr = (mem_size_t)((u8_t *)mem - ram);
size = mem->next - ptr - SIZEOF_STRUCT_MEM;
LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size);
if (newsize > size) {
/* not supported */
return NULL;
}
if (newsize == size) {
/* No change in size, simply return */
return rmem;
}
/* protect the heap from concurrent access */
LWIP_MEM_FREE_PROTECT();
mem2 = (struct mem *)(void *)&ram[mem->next];
if(mem2->used == 0) {
/* The next struct is unused, we can simply move it at little */
mem_size_t next;
/* remember the old next pointer */
next = mem2->next;
/* create new struct mem which is moved directly after the shrinked mem */
ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
if (lfree == mem2) {
lfree = (struct mem *)(void *)&ram[ptr2];
}
mem2 = (struct mem *)(void *)&ram[ptr2];
mem2->used = 0;
/* restore the next pointer */
mem2->next = next;
/* link it back to mem */
mem2->prev = ptr;
/* link mem to it */
mem->next = ptr2;
/* last thing to restore linked list: as we have moved mem2,
* let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not
* the end of the heap */
if (mem2->next != MEM_SIZE_ALIGNED) {
((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
}
MEM_STATS_DEC_USED(used, (size - newsize));
/* no need to plug holes, we've already done that */
} else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) {
/* Next struct is used but there's room for another struct mem with
* at least MIN_SIZE_ALIGNED of data.
* Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem
* ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED').
* @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
* region that couldn't hold data, but when mem->next gets freed,
* the 2 regions would be combined, resulting in more free memory */
ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
mem2 = (struct mem *)(void *)&ram[ptr2];
//.........这里部分代码省略.........