本文整理汇总了C++中UNIV_UNLIKELY函数的典型用法代码示例。如果您正苦于以下问题:C++ UNIV_UNLIKELY函数的具体用法?C++ UNIV_UNLIKELY怎么用?C++ UNIV_UNLIKELY使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了UNIV_UNLIKELY函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: dict_mem_table_add_col
/**********************************************************************//**
Adds a column definition to a table. */
UNIV_INTERN
void
dict_mem_table_add_col(
/*===================*/
dict_table_t* table, /*!< in: table */
mem_heap_t* heap, /*!< in: temporary memory heap, or NULL */
const char* name, /*!< in: column name, or NULL */
ulint mtype, /*!< in: main datatype */
ulint prtype, /*!< in: precise type */
ulint len) /*!< in: precision */
{
dict_col_t* col;
#ifndef UNIV_HOTBACKUP
ulint mbminlen;
ulint mbmaxlen;
#endif /* !UNIV_HOTBACKUP */
ulint i;
ut_ad(table);
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
ut_ad(!heap == !name);
i = table->n_def++;
if (name) {
if (UNIV_UNLIKELY(table->n_def == table->n_cols)) {
heap = table->heap;
}
if (UNIV_LIKELY(i) && UNIV_UNLIKELY(!table->col_names)) {
/* All preceding column names are empty. */
char* s = mem_heap_zalloc(heap, table->n_def);
table->col_names = s;
}
table->col_names = dict_add_col_name(table->col_names,
i, name, heap);
}
col = dict_table_get_nth_col(table, i);
col->ind = (unsigned int) i;
col->ord_part = 0;
col->mtype = (unsigned int) mtype;
col->prtype = (unsigned int) prtype;
col->len = (unsigned int) len;
#ifndef UNIV_HOTBACKUP
dtype_get_mblen(mtype, prtype, &mbminlen, &mbmaxlen);
col->mbminlen = (unsigned int) mbminlen;
col->mbmaxlen = (unsigned int) mbmaxlen;
#endif /* !UNIV_HOTBACKUP */
}
示例2: row_undo_ins_parse_undo_rec
/***********************************************************//**
Parses the row reference and other info in a fresh insert undo record. */
static
void
row_undo_ins_parse_undo_rec(
/*========================*/
ib_recovery_t recovery, /*!< in: recovery flag */
undo_node_t* node) /*!< in/out: row undo node */
{
dict_index_t* clust_index;
byte* ptr;
undo_no_t undo_no;
dulint table_id;
ulint type;
ulint dummy;
ibool dummy_extern;
ut_ad(node);
ptr = trx_undo_rec_get_pars(node->undo_rec, &type, &dummy,
&dummy_extern, &undo_no, &table_id);
ut_ad(type == TRX_UNDO_INSERT_REC);
node->rec_type = type;
node->update = NULL;
node->table = dict_table_get_on_id(
srv_force_recovery, table_id, node->trx);
/* Skip the UNDO if we can't find the table or the .ibd file. */
if (UNIV_UNLIKELY(node->table == NULL)) {
} else if (UNIV_UNLIKELY(node->table->ibd_file_missing)) {
node->table = NULL;
} else {
clust_index = dict_table_get_first_index(node->table);
if (clust_index != NULL) {
ptr = trx_undo_rec_get_row_ref(
ptr, clust_index, &node->ref, node->heap);
} else {
ut_print_timestamp(ib_stream);
ib_logger(ib_stream, " InnoDB: table ");
ut_print_name(ib_stream, node->trx, TRUE,
node->table->name);
ib_logger(ib_stream, " has no indexes, "
"ignoring the table\n");
node->table = NULL;
}
}
}
示例3: row_ext_cache_fill
/********************************************************************//**
Fills the column prefix cache of an externally stored column. */
static
void
row_ext_cache_fill(
/*===============*/
row_ext_t* ext, /*!< in/out: column prefix cache */
ulint i, /*!< in: index of ext->ext[] */
ulint zip_size,/*!< compressed page size in bytes, or 0 */
const dfield_t* dfield) /*!< in: data field */
{
const byte* field = dfield_get_data(dfield);
ulint f_len = dfield_get_len(dfield);
byte* buf = ext->buf + i * ext->max_len;
ut_ad(ext->max_len > 0);
ut_ad(i < ext->n_ext);
ut_ad(dfield_is_ext(dfield));
ut_a(f_len >= BTR_EXTERN_FIELD_REF_SIZE);
if (UNIV_UNLIKELY(!memcmp(field_ref_zero,
field + f_len - BTR_EXTERN_FIELD_REF_SIZE,
BTR_EXTERN_FIELD_REF_SIZE))) {
/* The BLOB pointer is not set: we cannot fetch it */
ext->len[i] = 0;
} else {
/* Fetch at most ext->max_len of the column.
The column should be non-empty. However,
trx_rollback_or_clean_all_recovered() may try to
access a half-deleted BLOB if the server previously
crashed during the execution of
btr_free_externally_stored_field(). */
ext->len[i] = btr_copy_externally_stored_field_prefix(
buf, ext->max_len, zip_size, field, f_len);
}
}
示例4: dict_drop_index_tree
/*******************************************************************//**
Drops the index tree associated with a row in SYS_INDEXES table. */
UNIV_INTERN
void
dict_drop_index_tree(
/*=================*/
rec_t* rec, /*!< in/out: record in the clustered index
of SYS_INDEXES table */
mtr_t* mtr) /*!< in: mtr having the latch on the record page */
{
ulint root_page_no;
ulint space;
ulint zip_size;
const byte* ptr;
ulint len;
ut_ad(mutex_own(&(dict_sys->mutex)));
ut_a(!dict_table_is_comp(dict_sys->sys_indexes));
ptr = rec_get_nth_field_old(rec, DICT_SYS_INDEXES_PAGE_NO_FIELD, &len);
ut_ad(len == 4);
root_page_no = mtr_read_ulint(ptr, MLOG_4BYTES, mtr);
if (root_page_no == FIL_NULL) {
/* The tree has already been freed */
return;
}
ptr = rec_get_nth_field_old(rec,
DICT_SYS_INDEXES_SPACE_NO_FIELD, &len);
ut_ad(len == 4);
space = mtr_read_ulint(ptr, MLOG_4BYTES, mtr);
zip_size = fil_space_get_zip_size(space);
if (UNIV_UNLIKELY(zip_size == ULINT_UNDEFINED)) {
/* It is a single table tablespace and the .ibd file is
missing: do nothing */
return;
}
/* We free all the pages but the root page first; this operation
may span several mini-transactions */
btr_free_but_not_root(space, zip_size, root_page_no);
/* Then we free the root page in the same mini-transaction where
we write FIL_NULL to the appropriate field in the SYS_INDEXES
record: this mini-transaction marks the B-tree totally freed */
/* printf("Dropping index tree in space %lu root page %lu\n", space,
root_page_no); */
btr_free_root(space, zip_size, root_page_no, mtr);
page_rec_write_index_page_no(rec,
DICT_SYS_INDEXES_PAGE_NO_FIELD,
FIL_NULL, mtr);
}
示例5: mlog_parse_string
/********************************************************//**
Parses a log record written by mlog_write_string.
@return parsed record end, NULL if not a complete record */
UNIV_INTERN
byte*
mlog_parse_string(
/*==============*/
byte* ptr, /*!< in: buffer */
byte* end_ptr,/*!< in: buffer end */
byte* page, /*!< in: page where to apply the log record, or NULL */
void* page_zip)/*!< in/out: compressed page, or NULL */
{
ulint offset;
ulint len;
ut_a(!page || !page_zip || fil_page_get_type(page) != FIL_PAGE_INDEX);
if (end_ptr < ptr + 4) {
return(NULL);
}
offset = mach_read_from_2(ptr);
ptr += 2;
len = mach_read_from_2(ptr);
ptr += 2;
if (UNIV_UNLIKELY(offset >= UNIV_PAGE_SIZE)
|| UNIV_UNLIKELY(len + offset > UNIV_PAGE_SIZE)) {
recv_sys->found_corrupt_log = TRUE;
return(NULL);
}
if (end_ptr < ptr + len) {
return(NULL);
}
if (page) {
if (UNIV_LIKELY_NULL(page_zip)) {
memcpy(((page_zip_des_t*) page_zip)->data
+ offset, ptr, len);
}
memcpy(page + offset, ptr, len);
}
return(ptr + len);
}
示例6: row_undo_mod_upd_del_sec
/***********************************************************//**
Undoes a modify in secondary indexes when undo record type is UPD_DEL.
@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
static
ulint
row_undo_mod_upd_del_sec(
/*=====================*/
undo_node_t* node, /*!< in: row undo node */
que_thr_t* thr) /*!< in: query thread */
{
mem_heap_t* heap;
dtuple_t* entry;
dict_index_t* index;
ulint err = DB_SUCCESS;
ut_ad(node->rec_type == TRX_UNDO_UPD_DEL_REC);
heap = mem_heap_create(1024);
while (node->index != NULL) {
/* Skip all corrupted secondary index */
dict_table_skip_corrupt_index(node->index);
if (!node->index) {
break;
}
index = node->index;
entry = row_build_index_entry(node->row, node->ext,
index, heap);
if (UNIV_UNLIKELY(!entry)) {
/* The database must have crashed after
inserting a clustered index record but before
writing all the externally stored columns of
that record. Because secondary index entries
are inserted after the clustered index record,
we may assume that the secondary index record
does not exist. However, this situation may
only occur during the rollback of incomplete
transactions. */
ut_a(thr_is_recv(thr));
} else {
err = row_undo_mod_del_mark_or_remove_sec(
node, thr, index, entry);
if (err != DB_SUCCESS) {
break;
}
}
mem_heap_empty(heap);
node->index = dict_table_get_next_index(node->index);
}
mem_heap_free(heap);
return(err);
}
示例7: row_undo_ins
/***********************************************************//**
Undoes a fresh insert of a row to a table. A fresh insert means that
the same clustered index unique key did not have any record, even delete
marked, at the time of the insert. InnoDB is eager in a rollback:
if it figures out that an index record will be removed in the purge
anyway, it will remove it in the rollback.
@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
UNIV_INTERN
ulint
row_undo_ins(
/*=========*/
undo_node_t* node) /*!< in: row undo node */
{
ut_ad(node);
ut_ad(node->state == UNDO_NODE_INSERT);
row_undo_ins_parse_undo_rec(node);
if (!node->table || !row_undo_search_clust_to_pcur(node)) {
trx_undo_rec_release(node->trx, node->undo_no);
return(DB_SUCCESS);
}
/* Iterate over all the indexes and undo the insert.*/
/* Skip the clustered index (the first index) */
node->index = dict_table_get_next_index(
dict_table_get_first_index(node->table));
dict_table_skip_corrupt_index(node->index);
while (node->index != NULL) {
dtuple_t* entry;
ulint err;
entry = row_build_index_entry(node->row, node->ext,
node->index, node->heap);
if (UNIV_UNLIKELY(!entry)) {
/* The database must have crashed after
inserting a clustered index record but before
writing all the externally stored columns of
that record. Because secondary index entries
are inserted after the clustered index record,
we may assume that the secondary index record
does not exist. However, this situation may
only occur during the rollback of incomplete
transactions. */
ut_a(trx_is_recv(node->trx));
} else {
log_free_check();
err = row_undo_ins_remove_sec(node->index, entry);
if (err != DB_SUCCESS) {
return(err);
}
}
dict_table_next_uncorrupted_index(node->index);
}
log_free_check();
return(row_undo_ins_remove_clust_rec(node));
}
示例8: row_undo_ins
/***********************************************************//**
Undoes a fresh insert of a row to a table. A fresh insert means that
the same clustered index unique key did not have any record, even delete
marked, at the time of the insert. InnoDB is eager in a rollback:
if it figures out that an index record will be removed in the purge
anyway, it will remove it in the rollback.
@return DB_SUCCESS or DB_OUT_OF_FILE_SPACE */
UNIV_INTERN
ulint
row_undo_ins(
/*=========*/
undo_node_t* node) /*!< in: row undo node */
{
ut_ad(node);
ut_ad(node->state == UNDO_NODE_INSERT);
row_undo_ins_parse_undo_rec(node);
if (!node->table || !row_undo_search_clust_to_pcur(node)) {
trx_undo_rec_release(node->trx, node->undo_no);
return(DB_SUCCESS);
}
/* Iterate over all the indexes and undo the insert.*/
/* Skip the clustered index (the first index) */
node->index = dict_table_get_next_index(
dict_table_get_first_index(node->table));
while (node->index != NULL) {
dtuple_t* entry;
ulint err;
entry = row_build_index_entry(node->row, node->ext,
node->index, node->heap);
if (UNIV_UNLIKELY(!entry)) {
/* The database must have crashed after
inserting a clustered index record but before
writing all the externally stored columns of
that record, or a statement is being rolled
back because an error occurred while storing
off-page columns.
Because secondary index entries are inserted
after the clustered index record, we may
assume that the secondary index record does
not exist. */
} else {
log_free_check();
err = row_undo_ins_remove_sec(node->index, entry);
if (err != DB_SUCCESS) {
return(err);
}
}
node->index = dict_table_get_next_index(node->index);
}
log_free_check();
return(row_undo_ins_remove_clust_rec(node));
}
示例9: mlog_write_string
void
mlog_write_string(
/*==============*/
byte* ptr, /* in: pointer where to write */
const byte* str, /* in: string to write */
ulint len, /* in: string length */
mtr_t* mtr) /* in: mini-transaction handle */
{
byte* log_ptr;
if (UNIV_UNLIKELY(ptr < buf_pool->frame_zero)
|| UNIV_UNLIKELY(ptr >= buf_pool->high_end)) {
fprintf(stderr,
"InnoDB: Error: trying to write to"
" a stray memory location %p\n", (void*) ptr);
ut_error;
}
ut_ad(ptr && mtr);
ut_a(len < UNIV_PAGE_SIZE);
ut_memcpy(ptr, str, len);
log_ptr = mlog_open(mtr, 30);
/* If no logging is requested, we may return now */
if (log_ptr == NULL) {
return;
}
log_ptr = mlog_write_initial_log_record_fast(ptr, MLOG_WRITE_STRING,
log_ptr, mtr);
mach_write_to_2(log_ptr, ptr - buf_frame_align(ptr));
log_ptr += 2;
mach_write_to_2(log_ptr, len);
log_ptr += 2;
mlog_close(mtr, log_ptr);
mlog_catenate_string(mtr, str, len);
}
示例10: mlog_write_dulint
void
mlog_write_dulint(
/*==============*/
byte* ptr, /* in: pointer where to write */
dulint val, /* in: value to write */
mtr_t* mtr) /* in: mini-transaction handle */
{
byte* log_ptr;
if (UNIV_UNLIKELY(ptr < buf_pool->frame_zero)
|| UNIV_UNLIKELY(ptr >= buf_pool->high_end)) {
fprintf(stderr,
"InnoDB: Error: trying to write to"
" a stray memory location %p\n", (void*) ptr);
ut_error;
}
ut_ad(ptr && mtr);
mach_write_to_8(ptr, val);
log_ptr = mlog_open(mtr, 11 + 2 + 9);
/* If no logging is requested, we may return now */
if (log_ptr == NULL) {
return;
}
log_ptr = mlog_write_initial_log_record_fast(ptr, MLOG_8BYTES,
log_ptr, mtr);
mach_write_to_2(log_ptr, ptr - buf_frame_align(ptr));
log_ptr += 2;
log_ptr += mach_dulint_write_compressed(log_ptr, val);
mlog_close(mtr, log_ptr);
}
示例11: dict_mem_table_add_col
/**********************************************************************//**
Adds a column definition to a table. */
UNIV_INTERN
void
dict_mem_table_add_col(
/*===================*/
dict_table_t* table, /*!< in: table */
mem_heap_t* heap, /*!< in: temporary memory heap, or NULL */
const char* name, /*!< in: column name, or NULL */
ulint mtype, /*!< in: main datatype */
ulint prtype, /*!< in: precise type */
ulint len) /*!< in: precision */
{
dict_col_t* col;
ulint i;
ut_ad(table);
ut_ad(table->magic_n == DICT_TABLE_MAGIC_N);
ut_ad(!heap == !name);
i = table->n_def++;
if (name) {
if (UNIV_UNLIKELY(table->n_def == table->n_cols)) {
heap = table->heap;
}
if (UNIV_LIKELY(i) && UNIV_UNLIKELY(!table->col_names)) {
/* All preceding column names are empty. */
char* s = mem_heap_zalloc(heap, table->n_def);
table->col_names = s;
}
table->col_names = dict_add_col_name(table->col_names,
i, name, heap);
}
col = dict_table_get_nth_col(table, i);
dict_mem_fill_column_struct(col, i, mtype, prtype, len);
}
示例12: os_fast_mutex_free
/**********************************************************//**
Frees a mutex object. */
UNIV_INTERN
void
os_fast_mutex_free(
/*===============*/
os_fast_mutex_t* fast_mutex) /*!< in: mutex to free */
{
#ifdef __WIN__
ut_a(fast_mutex);
DeleteCriticalSection((LPCRITICAL_SECTION) fast_mutex);
#else
int ret;
ret = pthread_mutex_destroy(fast_mutex);
if (UNIV_UNLIKELY(ret != 0)) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: error: return value %lu when calling\n"
"InnoDB: pthread_mutex_destroy().\n", (ulint)ret);
fprintf(stderr,
"InnoDB: Byte contents of the pthread mutex at %p:\n",
(void*) fast_mutex);
ut_print_buf(stderr, fast_mutex, sizeof(os_fast_mutex_t));
putc('\n', stderr);
}
#endif
if (UNIV_LIKELY(os_sync_mutex_inited)) {
/* When freeing the last mutexes, we have
already freed os_sync_mutex */
os_mutex_enter(os_sync_mutex);
}
ut_ad(os_fast_mutex_count > 0);
os_fast_mutex_count--;
if (UNIV_LIKELY(os_sync_mutex_inited)) {
os_mutex_exit(os_sync_mutex);
}
}
示例13: btr_pcur_store_position
/**************************************************************//**
The position of the cursor is stored by taking an initial segment of the
record the cursor is positioned on, before, or after, and copying it to the
cursor data structure, or just setting a flag if the cursor id before the
first in an EMPTY tree, or after the last in an EMPTY tree. NOTE that the
page where the cursor is positioned must not be empty if the index tree is
not totally empty! */
UNIV_INTERN
void
btr_pcur_store_position(
/*====================*/
btr_pcur_t* cursor, /*!< in: persistent cursor */
mtr_t* mtr) /*!< in: mtr */
{
page_cur_t* page_cursor;
buf_block_t* block;
rec_t* rec;
dict_index_t* index;
page_t* page;
ulint offs;
ut_a(cursor->pos_state == BTR_PCUR_IS_POSITIONED);
ut_ad(cursor->latch_mode != BTR_NO_LATCHES);
block = btr_pcur_get_block(cursor);
if (srv_pass_corrupt_table && !block) {
return;
}
ut_a(block);
index = btr_cur_get_index(btr_pcur_get_btr_cur(cursor));
page_cursor = btr_pcur_get_page_cur(cursor);
rec = page_cur_get_rec(page_cursor);
page = page_align(rec);
offs = page_offset(rec);
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_S_FIX)
|| mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_a(cursor->latch_mode != BTR_NO_LATCHES);
if (UNIV_UNLIKELY(page_get_n_recs(page) == 0)) {
/* It must be an empty index tree; NOTE that in this case
we do not store the modify_clock, but always do a search
if we restore the cursor position */
ut_a(btr_page_get_next(page, mtr) == FIL_NULL);
ut_a(btr_page_get_prev(page, mtr) == FIL_NULL);
ut_ad(page_is_leaf(page));
ut_ad(page_get_page_no(page) == index->page);
cursor->old_stored = BTR_PCUR_OLD_STORED;
if (page_rec_is_supremum_low(offs)) {
cursor->rel_pos = BTR_PCUR_AFTER_LAST_IN_TREE;
} else {
cursor->rel_pos = BTR_PCUR_BEFORE_FIRST_IN_TREE;
}
return;
}
if (page_rec_is_supremum_low(offs)) {
rec = page_rec_get_prev(rec);
cursor->rel_pos = BTR_PCUR_AFTER;
} else if (page_rec_is_infimum_low(offs)) {
rec = page_rec_get_next(rec);
cursor->rel_pos = BTR_PCUR_BEFORE;
} else {
cursor->rel_pos = BTR_PCUR_ON;
}
cursor->old_stored = BTR_PCUR_OLD_STORED;
cursor->old_rec = dict_index_copy_rec_order_prefix(
index, rec, &cursor->old_n_fields,
&cursor->old_rec_buf, &cursor->buf_size);
cursor->block_when_stored = block;
cursor->modify_clock = buf_block_get_modify_clock(block);
}
示例14: fill_lock_data
/*******************************************************************//**
Fills the "lock_data" member of i_s_locks_row_t object.
If memory can not be allocated then FALSE is returned.
@return FALSE if allocation fails */
static
ibool
fill_lock_data(
/*===========*/
const char** lock_data,/*!< out: "lock_data" to fill */
const lock_t* lock, /*!< in: lock used to find the data */
ulint heap_no,/*!< in: rec num used to find the data */
trx_i_s_cache_t* cache) /*!< in/out: cache where to store
volatile data */
{
mtr_t mtr;
const buf_block_t* block;
const page_t* page;
const rec_t* rec;
ut_a(lock_get_type(lock) == LOCK_REC);
mtr_start(&mtr);
block = buf_page_try_get(lock_rec_get_space_id(lock),
lock_rec_get_page_no(lock),
&mtr);
if (block == NULL) {
*lock_data = NULL;
mtr_commit(&mtr);
return(TRUE);
}
page = (const page_t*) buf_block_get_frame(block);
rec = page_find_rec_with_heap_no(page, heap_no);
if (page_rec_is_infimum(rec)) {
*lock_data = ha_storage_put_str_memlim(
cache->storage, "infimum pseudo-record",
MAX_ALLOWED_FOR_STORAGE(cache));
} else if (page_rec_is_supremum(rec)) {
*lock_data = ha_storage_put_str_memlim(
cache->storage, "supremum pseudo-record",
MAX_ALLOWED_FOR_STORAGE(cache));
} else {
const dict_index_t* index;
ulint n_fields;
mem_heap_t* heap;
ulint offsets_onstack[REC_OFFS_NORMAL_SIZE];
ulint* offsets;
char buf[TRX_I_S_LOCK_DATA_MAX_LEN];
ulint buf_used;
ulint i;
rec_offs_init(offsets_onstack);
offsets = offsets_onstack;
index = lock_rec_get_index(lock);
n_fields = dict_index_get_n_unique(index);
ut_a(n_fields > 0);
heap = NULL;
offsets = rec_get_offsets(rec, index, offsets, n_fields,
&heap);
/* format and store the data */
buf_used = 0;
for (i = 0; i < n_fields; i++) {
buf_used += put_nth_field(
buf + buf_used, sizeof(buf) - buf_used,
i, index, rec, offsets) - 1;
}
*lock_data = (const char*) ha_storage_put_memlim(
cache->storage, buf, buf_used + 1,
MAX_ALLOWED_FOR_STORAGE(cache));
if (UNIV_UNLIKELY(heap != NULL)) {
/* this means that rec_get_offsets() has created a new
heap and has stored offsets in it; check that this is
really the case and free the heap */
ut_a(offsets != offsets_onstack);
mem_heap_free(heap);
}
}
mtr_commit(&mtr);
//.........这里部分代码省略.........
示例15: mem_area_free
/********************************************************************//**
Frees memory to a pool. */
UNIV_INTERN
void
mem_area_free(
/*==========*/
void* ptr, /*!< in, own: pointer to allocated memory
buffer */
mem_pool_t* pool) /*!< in: memory pool */
{
mem_area_t* area;
mem_area_t* buddy;
void* new_ptr;
ulint size;
ulint n;
if (UNIV_LIKELY(srv_use_sys_malloc)) {
free(ptr);
return;
}
/* It may be that the area was really allocated from the OS with
regular malloc: check if ptr points within our memory pool */
if ((byte*)ptr < pool->buf || (byte*)ptr >= pool->buf + pool->size) {
ut_free(ptr);
return;
}
area = (mem_area_t*) (((byte*)ptr) - MEM_AREA_EXTRA_SIZE);
if (mem_area_get_free(area)) {
fprintf(stderr,
"InnoDB: Error: Freeing element to mem pool"
" free list though the\n"
"InnoDB: element is marked free!\n");
mem_analyze_corruption(area);
ut_error;
}
size = mem_area_get_size(area);
UNIV_MEM_FREE(ptr, size - MEM_AREA_EXTRA_SIZE);
if (size == 0) {
fprintf(stderr,
"InnoDB: Error: Mem area size is 0. Possibly a"
" memory overrun of the\n"
"InnoDB: previous allocated area!\n");
mem_analyze_corruption(area);
ut_error;
}
#ifdef UNIV_LIGHT_MEM_DEBUG
if (((byte*)area) + size < pool->buf + pool->size) {
ulint next_size;
next_size = mem_area_get_size(
(mem_area_t*)(((byte*)area) + size));
if (UNIV_UNLIKELY(!next_size || !ut_is_2pow(next_size))) {
fprintf(stderr,
"InnoDB: Error: Memory area size %lu,"
" next area size %lu not a power of 2!\n"
"InnoDB: Possibly a memory overrun of"
" the buffer being freed here.\n",
(ulong) size, (ulong) next_size);
mem_analyze_corruption(area);
ut_error;
}
}
#endif
buddy = mem_area_get_buddy(area, size, pool);
n = ut_2_log(size);
mem_pool_mutex_enter(pool);
mem_n_threads_inside++;
ut_a(mem_n_threads_inside == 1);
if (buddy && mem_area_get_free(buddy)
&& (size == mem_area_get_size(buddy))) {
/* The buddy is in a free list */
if ((byte*)buddy < (byte*)area) {
new_ptr = ((byte*)buddy) + MEM_AREA_EXTRA_SIZE;
mem_area_set_size(buddy, 2 * size);
mem_area_set_free(buddy, FALSE);
} else {
new_ptr = ptr;
mem_area_set_size(area, 2 * size);
}
//.........这里部分代码省略.........