美文网首页android之binder学习攻克
Binder Driver 5: binder_buffer 管

Binder Driver 5: binder_buffer 管

作者: ColdWave | 来源:发表于2018-06-28 14:23 被阅读0次

内核缓冲区的管理

物理内存的分配和释放----binder_update_page_range

static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
                    void *start, void *end,
                    struct vm_area_struct *vma)  // vma 是要映射的用户空间虚拟地址
{
    void *page_addr;
    unsigned long user_page_addr;
    struct page **page;
    struct mm_struct *mm;

    /*
         * struct vm_struct      : 描述内核空间虚拟地址,对应于 物理内存 的高端内存
         * struct vm_area_struct : 描述用户空间虚拟地址
         */

    if (vma)
        mm = NULL;
    else
        mm = get_task_mm(alloc->tsk);

    if (mm) {
        down_write(&mm->mmap_sem);
        vma = alloc->vma;
    }

    if (allocate == 0)
        goto free_range;

    for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
        page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
        *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);    // 真正的去分配一页物理内存
        map_kernel_range_noflush((unsigned long)page_addr,              // 将物理页面和内核空间虚拟地址关联(map)
                    PAGE_SIZE, PAGE_KERNEL, page);
        flush_cache_vmap((unsigned long)page_addr,                      // 刷 cache
                (unsigned long)page_addr + PAGE_SIZE);
        user_page_addr =
            (uintptr_t)page_addr + alloc->user_buffer_offset;
        vm_insert_page(vma, user_page_addr, page[0]);                  // 将物理页面和用户空间虚拟地址关联
    }
    if (mm) {
        up_write(&mm->mmap_sem);
        mmput(mm);
    }
    return 0;

free_range:
    for (page_addr = end - PAGE_SIZE; page_addr >= start;
         page_addr -= PAGE_SIZE) {
        page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
        if (vma)
            zap_page_range(vma, (uintptr_t)page_addr +
                alloc->user_buffer_offset, PAGE_SIZE, NULL);     // 解除物理页面在用户空间的映射
err_vm_insert_page_failed:
        unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); // 解除物理页面在内核空间的映射
err_map_kernel_failed:
        __free_page(*page);                                      // 释放物理页面
        *page = NULL;
err_alloc_page_failed:
        ;
    }
err_no_vma:
    if (mm) {
        up_write(&mm->mmap_sem);
        mmput(mm);
    }
    return vma ? -ENOMEM : -ESRCH;
}

Binder 内存分配

  • binder_alloc_new_buf
struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
                       size_t data_size,           // size of user data buffer
                       size_t offsets_size,        // user specified buffer offset
                       size_t extra_buffers_size,  // size of extra space for meta-data (eg, security context)
                       int is_async)               // buffer for async transaction
{
    struct binder_buffer *buffer;

    mutex_lock(&alloc->mutex);
    buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
                         extra_buffers_size, is_async);
    mutex_unlock(&alloc->mutex);
    return buffer;
}
  • binder_alloc_new_buf_locked
/*
    使用 BC_TRANSACTION 或 BC_REPLY 与 Binder 交互时,会从 Userspace 传递 binder_transaction_data 到 Driver。
    在 binder_transaction_data 中有一个数据缓冲区和偏移数组缓冲区,这两个缓冲区的内容需要拷贝到目标进程的内核缓冲区中。

    注意:这里是一次拷贝。直接拷贝到 目标进程 的内核缓冲区中。
*/
struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
                          size_t data_size,
                          size_t offsets_size,
                          size_t extra_buffers_size,
                          int is_async)
{
    struct rb_node *n = alloc->free_buffers.rb_node;  // 从 free_buffers 中找合适的 buffer
    struct binder_buffer *buffer;
    size_t buffer_size;
    struct rb_node *best_fit = NULL;
    void *has_page_addr;
    void *end_page_addr;
    size_t size, data_offsets_size;
    int ret;

    data_offsets_size = ALIGN(data_size, sizeof(void *)) +
        ALIGN(offsets_size, sizeof(void *));
    // 将 data_size 和 offsets_size 对其到 void * , 然后 相加,就计算出了要分配的空间大小。
    
    size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *)); // 再加上 附加 空间的大小
    
    if (is_async &&
        alloc->free_async_space < size + sizeof(struct binder_buffer)) { 
    // 如果是异步事务,并且剩余可分配的异步事务空间小于要分配的空间,则报错
        binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
                 "%d: binder_alloc_buf size %zd failed, no async space left\n",
                  alloc->pid, size);
        return ERR_PTR(-ENOSPC);
    }

    /* Pad 0-size buffers so they get assigned unique addresses */
    size = max(size, sizeof(void *));  // size 最小不得小于 sizeof(void*)

    // 遍历 红黑树 free_buffers,找出空间合适的 binder_buffer
    while (n) {
        buffer = rb_entry(n, struct binder_buffer, rb_node);
        BUG_ON(!buffer->free);
        buffer_size = binder_alloc_buffer_size(alloc, buffer);

        if (size < buffer_size) {
            best_fit = n;
            n = n->rb_left;
        } else if (size > buffer_size)
            n = n->rb_right;
        else {
            best_fit = n;
            break;
        }
    }
    // 没有找到合适 binder_buffer,出错
    if (best_fit == NULL) {
        size_t allocated_buffers = 0;
        size_t largest_alloc_size = 0;
        size_t total_alloc_size = 0;
        size_t free_buffers = 0;
        size_t largest_free_size = 0;
        size_t total_free_size = 0;

        // 遍历 allocated_buffers, 计算总共分配的空间,以及分配过的最大的一块buffer
        for (n = rb_first(&alloc->allocated_buffers); n != NULL;
             n = rb_next(n)) {
            buffer = rb_entry(n, struct binder_buffer, rb_node);
            buffer_size = binder_alloc_buffer_size(alloc, buffer);
            allocated_buffers++;
            total_alloc_size += buffer_size;
            if (buffer_size > largest_alloc_size)
                largest_alloc_size = buffer_size;
        }
        // 遍历 free_buffers, 计算所有的空闲Buffer的大小,以及最大的空闲buffer的大小
        for (n = rb_first(&alloc->free_buffers); n != NULL;
             n = rb_next(n)) {
            buffer = rb_entry(n, struct binder_buffer, rb_node);
            buffer_size = binder_alloc_buffer_size(alloc, buffer);
            free_buffers++;
            total_free_size += buffer_size;
            if (buffer_size > largest_free_size)
                largest_free_size = buffer_size;
        }
        // 这里的 log 信息对于 内存调试尤为重要
        pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
            alloc->pid, size);
        pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
               total_alloc_size, allocated_buffers, largest_alloc_size,
               total_free_size, free_buffers, largest_free_size);
        return ERR_PTR(-ENOSPC);
    }
    // 找到了 一块比要分配的buffer稍大 的一块空闲的 binder_buffer
    if (n == NULL) {
        buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
        buffer_size = binder_alloc_buffer_size(alloc, buffer);  
        // buffer 是找到的比要分配空间稍大的一块空闲buffer
        // buffer_size 是这块空闲buffer的大小
    }
    /*
        没有合适大小的 buffer,只能用比 所需空间 稍大一点的 buffer。
        那么,就需要对 这块稍大的 空闲buffer 进行裁剪。
        如果剩余的空间小于 4个字节,就不裁剪了。
        否则剩余的 空闲 buffer 依然放入到 空闲红黑树中供下次使用。
    */

    // 计算 空闲buffer 的结束地址所在的页面的起始地址
    has_page_addr =
        (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
    WARN_ON(n && buffer_size != size);
    // size 是要分配的空间大小,end_page_addr 是要分配的结束地址对齐到页面边界的地址
    end_page_addr =
        (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);

    // has_page_addr: 在 空闲buffer红黑树中 找到的一块 空闲buffer,并且将结束地址对齐到结束地址所在页面的起始地址
    // end_page_addr: 在 空闲buffer红黑树中 找到的一块 空闲buffer,将首地址 + 要分配的大小 = 要分配的结束地址,然后将之对齐到页面起始地址

    if (end_page_addr > has_page_addr)  // 对齐之后的地址修正
        end_page_addr = has_page_addr;
    
    // 分配物理空间
    binder_update_page_range(alloc, 1,
        (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);  

    // 将剩余的空闲buffer插入到 free buffer 的红黑树中
    if (buffer_size != size) {
        struct binder_buffer *new_buffer;
        new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
        new_buffer->data = (u8 *)buffer->data + size;
        list_add(&new_buffer->entry, &buffer->entry);
        /*
         * 这句话对于弄懂 alloc->buffers 这个链表来说至关重要.
         * alloc->buffers 这个链表的顺序就是 buffer->data 的大小.
         * alloc->buffers : 最开始是一块大的 空buffer.
         * 第一次分配: 大的 buffer 被切成两块,前一部分是 和 后一部分 分别用 不同的 binder_buffer 描述,然后将这两个 binder_buffer 链接.
        */
        new_buffer->free = 1;
        binder_insert_free_buffer(alloc, new_buffer);
    }

    // 将 buffer 从 free_buffers 中删除
    rb_erase(best_fit, &alloc->free_buffers);

    // 初始化新分配的 buffer
    buffer->free = 0;
    buffer->free_in_progress = 0;
    // 将当前 binder_buffer 插入到 allocated_buffers 的红黑树中.
    binder_insert_allocated_buffer_locked(alloc, buffer);
    buffer->data_size = data_size;
    buffer->offsets_size = offsets_size;
    buffer->async_transaction = is_async;
    buffer->extra_buffers_size = extra_buffers_size;
    // 如果是 异步事件, 那么更新 binder_alloc 的 异步事件空闲Buffer
    if (is_async) {
        alloc->free_async_space -= size + sizeof(struct binder_buffer);
    }
    return buffer;
}


## Binder 内存释放

- binder_alloc_free_buf

```c
void binder_alloc_free_buf(struct binder_alloc *alloc,
                struct binder_buffer *buffer)
{
    mutex_lock(&alloc->mutex);
    binder_free_buf_locked(alloc, buffer);
    mutex_unlock(&alloc->mutex);
}
  • binder_free_buf_locked
// 计算 buffer->data 地址所在页面的起始地址
static void *buffer_start_page(struct binder_buffer *buffer)
{
    return (void *)((uintptr_t)buffer->data & PAGE_MASK);
}

// 计算 buffer->data-1 即 上一个 buffer 地址所在页面的起始地址
static void *prev_buffer_end_page(struct binder_buffer *buffer)
{
    return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK); // -1 其实是减去的 4 个字节
}

static void binder_delete_free_buffer(struct binder_alloc *alloc,
                      struct binder_buffer *buffer)
{
    struct binder_buffer *prev, *next = NULL;
    bool to_free = true;
    BUG_ON(alloc->buffers.next == &buffer->entry);
    prev = binder_buffer_prev(buffer);  // 找到 该buffer 在 binder_alloc->buffers list 中的前一个 binder_buffer
    BUG_ON(!prev->free);
    
    // 如果当前 buffer 和 上一个 buffer 在同一个物理页面上
    if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
        to_free = false;
    }

    if (!list_is_last(&buffer->entry, &alloc->buffers)) {
        /*
         * 如果当前 buffer 不是 alloc->buffers 的最后一个 buffer
        */
        next = binder_buffer_next(buffer);
        // 如果当前 buffer 和 下一个 buffer 在同一个物理页面上.
        if (buffer_start_page(next) == buffer_start_page(buffer)) {
            to_free = false;
        }
    }

    if (PAGE_ALIGNED(buffer->data)) {
        to_free = false;
    }

    if (to_free) {
        /*
         * buffer_start_page(buffer) ~ buffer_start_page(buffer) + PAGE_SIZE
         * 这个地址范围就是该 buffer 所在物理页面的整个页面的地址空间.
        */
        binder_update_page_range(alloc, 0, buffer_start_page(buffer),
                     buffer_start_page(buffer) + PAGE_SIZE,
                     NULL);
    }
    list_del(&buffer->entry);
    kfree(buffer);
}

static void binder_free_buf_locked(struct binder_alloc *alloc,
                   struct binder_buffer *buffer)
{
    size_t size, buffer_size;

    // 计算要释放的 binder_buffer 的大小,因为在管理 binder_buffer 时是以空间大小管理的
    buffer_size = binder_alloc_buffer_size(alloc, buffer);

    size = ALIGN(buffer->data_size, sizeof(void *)) +
        ALIGN(buffer->offsets_size, sizeof(void *)) +
        ALIGN(buffer->extra_buffers_size, sizeof(void *));

    if (buffer->async_transaction) {
        alloc->free_async_space += size + sizeof(struct binder_buffer);
    }

    binder_update_page_range(alloc, 0,
        (void *)PAGE_ALIGN((uintptr_t)buffer->data),
        (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
        NULL);

    // 从 allocated_buffers 红黑树中删除该 binder_buffer
    rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
    buffer->free = 1;
    if (!list_is_last(&buffer->entry, &alloc->buffers)) {
        /*
         * 如果当前 buffer 不是 alloc->buffers 的最后一个 buffer, 会判断该 buffer 的下一个 buffer
         * 如果下一个 buffer 也是 空buffer,那么会合并两个buffer
         */
        struct binder_buffer *next = binder_buffer_next(buffer);
        if (next->free) {
            rb_erase(&next->rb_node, &alloc->free_buffers); // 将 next buffer 从 free_buffers 红黑树中删除
            binder_delete_free_buffer(alloc, next);
        }
    }
    // 对 该buffer 的前一个 buffer 的处理方式同 next buffer 的处理方式一致
    if (alloc->buffers.next != &buffer->entry) {
        struct binder_buffer *prev = binder_buffer_prev(buffer);
        if (prev->free) {
            binder_delete_free_buffer(alloc, buffer);
            rb_erase(&prev->rb_node, &alloc->free_buffers);
            buffer = prev;
        }
    }
    // 将 该buffer 插入到 free_buffers 红黑树中.(此时的 buffer 可能是和前一个或后一个 空buffer 合并过的buffer)
    binder_insert_free_buffer(alloc, buffer);
}

Binder 内存查询.

  • binder_alloc_prepare_to_free
/*
 * 根据给定的 user ptr 查询 binder_buffer
*/
struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
                           uintptr_t user_ptr)
{
    struct binder_buffer *buffer;

    mutex_lock(&alloc->mutex);
    buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
    mutex_unlock(&alloc->mutex);
    return buffer;
}
  • binder_alloc_prepare_to_free_locked
/*
* 核心就是 allocated_buffers 红黑树的 遍历
*/
static struct binder_buffer *binder_alloc_prepare_to_free_locked(
        struct binder_alloc *alloc,
        uintptr_t user_ptr)
{
    struct rb_node *n = alloc->allocated_buffers.rb_node;
    struct binder_buffer *buffer;
    void *kern_ptr;

    kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);

    while (n) {
        buffer = rb_entry(n, struct binder_buffer, rb_node);
        BUG_ON(buffer->free);

        if (kern_ptr < buffer->data)
            n = n->rb_left;
        else if (kern_ptr > buffer->data)
            n = n->rb_right;
        else {
            /*
             * Guard against user threads attempting to
             * free the buffer twice
             */
            if (buffer->free_in_progress) {
                pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
                       alloc->pid, current->pid, (u64)user_ptr);
                return NULL;
            }
            buffer->free_in_progress = 1;
            return buffer;
        }
    }
    return NULL;
}

Binder allocator 释放

  • binder_alloc_deferred_release
void binder_alloc_deferred_release(struct binder_alloc *alloc)
{
    struct rb_node *n;
    int buffers, page_count;
    struct binder_buffer *buffer;

    BUG_ON(alloc->vma);

    buffers = 0;
    mutex_lock(&alloc->mutex);
    while ((n = rb_first(&alloc->allocated_buffers))) {
        buffer = rb_entry(n, struct binder_buffer, rb_node);

        /* Transaction should already have been freed */
        BUG_ON(buffer->transaction);

        binder_free_buf_locked(alloc, buffer);
        buffers++;
    }

    while (!list_empty(&alloc->buffers)) {
        buffer = list_first_entry(&alloc->buffers,
                      struct binder_buffer, entry);
        WARN_ON(!buffer->free);

        list_del(&buffer->entry);
        WARN_ON_ONCE(!list_empty(&alloc->buffers));
        kfree(buffer);
    }

    page_count = 0;
    if (alloc->pages) {
        int i;

        for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
            void *page_addr;

            if (!alloc->pages[i])
                continue;

            page_addr = alloc->buffer + i * PAGE_SIZE;
            binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
                     "%s: %d: page %d at %pK not freed\n",
                     __func__, alloc->pid, i, page_addr);
            unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
            __free_page(alloc->pages[i]);
            page_count++;
        }
        kfree(alloc->pages);
        vfree(alloc->buffer);
    }
    mutex_unlock(&alloc->mutex);

    binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
             "%s: %d buffers %d, pages %d\n",
             __func__, alloc->pid, buffers, page_count);
}

相关文章

网友评论

    本文标题:Binder Driver 5: binder_buffer 管

    本文链接:https://www.haomeiwen.com/subject/xnpgyftx.html