函数buf_LRU_free_block

时间:2021-06-17 15:27:31
/******************************************************************//**
Try to free a block.  If bpage is a descriptor of a compressed-only
page, the descriptor object will be freed as well.

NOTE: If this function returns TRUE, it will temporarily
release buf_pool->mutex.  Furthermore, the page frame will no longer be
accessible via bpage.

The caller must hold buf_pool->mutex and buf_page_get_mutex(bpage) and
release these two mutexes after the call.  No other
buf_page_get_mutex() may be held when calling this function.
@return TRUE if freed, FALSE otherwise. */
UNIV_INTERN
ibool
buf_LRU_free_block(
/*===============*/
    buf_page_t*    bpage,    /*!< in: block to be freed */
    ibool        zip)    /*!< in: TRUE if should remove also the
                compressed page of an uncompressed page */
{
    buf_page_t*    b = NULL;
    buf_pool_t*    buf_pool = buf_pool_from_bpage(bpage);
    mutex_t*    block_mutex = buf_page_get_mutex(bpage);

    ut_ad(buf_pool_mutex_own(buf_pool));
    ut_ad(mutex_own(block_mutex));
    ut_ad(buf_page_in_file(bpage));
    ut_ad(bpage->in_LRU_list);
    ut_ad(!bpage->in_flush_list == !bpage->oldest_modification);
#if UNIV_WORD_SIZE == 4
    /* On 32-bit systems, there is no padding in buf_page_t.  On
    other systems, Valgrind could complain about uninitialized pad
    bytes. */
    UNIV_MEM_ASSERT_RW(bpage, sizeof *bpage);
#endif

    if (!buf_page_can_relocate(bpage)) {

        /* Do not free buffer-fixed or I/O-fixed blocks. */
        return(FALSE);
    }

#ifdef UNIV_IBUF_COUNT_DEBUG
    ut_a(ibuf_count_get(bpage->space, bpage->offset) == );
#endif /* UNIV_IBUF_COUNT_DEBUG */

    if (zip || !bpage->zip.data) {
        /* This would completely free the block. */
        /* Do not completely free dirty blocks. */

        if (bpage->oldest_modification) {
            return(FALSE);
        }
    } else if (bpage->oldest_modification) {
        /* Do not completely free dirty blocks. */

        if (buf_page_get_state(bpage) != BUF_BLOCK_FILE_PAGE) {
            ut_ad(buf_page_get_state(bpage)
                  == BUF_BLOCK_ZIP_DIRTY);
            return(FALSE);
        }

        goto alloc;
    } else if (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE) {
        /* Allocate the control block for the compressed page.
        If it cannot be allocated (without freeing a block
        from the LRU list), refuse to free bpage. */
alloc:
        b = buf_page_alloc_descriptor();
        ut_a(b);
        memcpy(b, bpage, sizeof *b);
    }

#ifdef UNIV_DEBUG
    if (buf_debug_prints) {
        fprintf(stderr, "Putting space %lu page %lu to free list\n",
            (ulong) buf_page_get_space(bpage),
            (ulong) buf_page_get_page_no(bpage));
    }
#endif /* UNIV_DEBUG */

    if (buf_LRU_block_remove_hashed_page(bpage, zip)!= BUF_BLOCK_ZIP_FREE) {  //这里
        ut_a(bpage->buf_fix_count == );

        if (b) {
            buf_page_t*    hash_b;
            buf_page_t*    prev_b    = UT_LIST_GET_PREV(LRU, b);

            const ulint    fold = buf_page_address_fold(
                bpage->space, bpage->offset);

            hash_b    = buf_page_hash_get_low(
                buf_pool, bpage->space, bpage->offset, fold);

            ut_a(!hash_b);

            b->state = b->oldest_modification
                ? BUF_BLOCK_ZIP_DIRTY
                : BUF_BLOCK_ZIP_PAGE;
            UNIV_MEM_DESC(b->zip.data,
                      page_zip_get_size(&b->zip), b);

            /* The fields in_page_hash and in_LRU_list of
            the to-be-freed block descriptor should have
            been cleared in
            buf_LRU_block_remove_hashed_page(), which
            invokes buf_LRU_remove_block(). */
            ut_ad(!bpage->in_page_hash);
            ut_ad(!bpage->in_LRU_list);
            /* bpage->state was BUF_BLOCK_FILE_PAGE because
            b != NULL. The type cast below is thus valid. */
            ut_ad(!((buf_block_t*) bpage)->in_unzip_LRU_list);

            /* The fields of bpage were copied to b before
            buf_LRU_block_remove_hashed_page() was invoked. */
            ut_ad(!b->in_zip_hash);
            ut_ad(b->in_page_hash);
            ut_ad(b->in_LRU_list);

            HASH_INSERT(buf_page_t, hash,
                    buf_pool->page_hash, fold, b);

            /* Insert b where bpage was in the LRU list. */
            if (UNIV_LIKELY(prev_b != NULL)) {
                ulint    lru_len;

                ut_ad(prev_b->in_LRU_list);
                ut_ad(buf_page_in_file(prev_b));
#if UNIV_WORD_SIZE == 4
                /* On 32-bit systems, there is no
                padding in buf_page_t.  On other
                systems, Valgrind could complain about
                uninitialized pad bytes. */
                UNIV_MEM_ASSERT_RW(prev_b, sizeof *prev_b);
#endif
                UT_LIST_INSERT_AFTER(LRU, buf_pool->LRU,
                             prev_b, b);

                incr_LRU_size_in_bytes(b, buf_pool);

                if (buf_page_is_old(b)) {
                    buf_pool->LRU_old_len++;
                    if (UNIV_UNLIKELY
                        (buf_pool->LRU_old
                         == UT_LIST_GET_NEXT(LRU, b))) {

                        buf_pool->LRU_old = b;
                    }
                }

                lru_len = UT_LIST_GET_LEN(buf_pool->LRU);

                if (lru_len > BUF_LRU_OLD_MIN_LEN) {
                    ut_ad(buf_pool->LRU_old);
                    /* Adjust the length of the
                    old block list if necessary */
                    buf_LRU_old_adjust_len(buf_pool);
                } else if (lru_len == BUF_LRU_OLD_MIN_LEN) {
                    /* The LRU list is now long
                    enough for LRU_old to become
                    defined: init it */
                    buf_LRU_old_init(buf_pool);
                }
#ifdef UNIV_LRU_DEBUG
                /* Check that the "old" flag is consistent
                in the block and its neighbours. */
                buf_page_set_old(b, buf_page_is_old(b));
#endif /* UNIV_LRU_DEBUG */
            } else {
                ut_d(b->in_LRU_list = FALSE);
                buf_LRU_add_block_low(b, buf_page_is_old(b));
            }

            if (b->state == BUF_BLOCK_ZIP_PAGE) {
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
                buf_LRU_insert_zip_clean(b);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
            } else {
                /* Relocate on buf_pool->flush_list. */
                buf_flush_relocate_on_flush_list(bpage, b);
            }

            bpage->zip.data = NULL;
            page_zip_set_size(&bpage->zip, );

            /* Prevent buf_page_get_gen() from
            decompressing the block while we release
            buf_pool->mutex and block_mutex. */
            mutex_enter(&buf_pool->zip_mutex);
            buf_page_set_sticky(b);
            mutex_exit(&buf_pool->zip_mutex);
        }

        buf_pool_mutex_exit(buf_pool);
        mutex_exit(block_mutex);

        /* Remove possible adaptive hash index on the page.
        The page was declared uninitialized by
        buf_LRU_block_remove_hashed_page().  We need to flag
        the contents of the page valid (which it still is) in
        order to avoid bogus Valgrind warnings.*/

        UNIV_MEM_VALID(((buf_block_t*) bpage)->frame,
                   UNIV_PAGE_SIZE);
        btr_search_drop_page_hash_index((buf_block_t*) bpage);
        UNIV_MEM_INVALID(((buf_block_t*) bpage)->frame,
                 UNIV_PAGE_SIZE);

        if (b) {
            /* Compute and stamp the compressed page
            checksum while not holding any mutex.  The
            block is already half-freed
            (BUF_BLOCK_REMOVE_HASH) and removed from
            buf_pool->page_hash, thus inaccessible by any
            other thread. */

            mach_write_to_4(
                b->zip.data + FIL_PAGE_SPACE_OR_CHKSUM,
                UNIV_LIKELY(srv_use_checksums)
                ? page_zip_calc_checksum(
                    b->zip.data,
                    page_zip_get_size(&b->zip))
                : BUF_NO_CHECKSUM_MAGIC);
        }

        buf_pool_mutex_enter(buf_pool);
        mutex_enter(block_mutex);

        if (b) {
            mutex_enter(&buf_pool->zip_mutex);
            buf_page_unset_sticky(b);
            mutex_exit(&buf_pool->zip_mutex);
        }

        buf_LRU_block_free_hashed_page((buf_block_t*) bpage);
    } else {
        /* The block_mutex should have been released by
        buf_LRU_block_remove_hashed_page() when it returns
        BUF_BLOCK_ZIP_FREE. */
        ut_ad(block_mutex == &buf_pool->zip_mutex);
        mutex_enter(block_mutex);
    }

    return(TRUE);
}