static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
struct binder_proc *proc = filp->private_data;
const char *failure_string;
if ((vma->vm_end - vma->vm_start) > SZ_4M) // vma->vm_end, vma->vm_start 指向要 映射的用户空间地址, map size 不允许 大于 4M
vma->vm_end = vma->vm_start + SZ_4M;
if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { // mmap 的 buffer 禁止用户进行写操作。mmap 只是为了分配内核空间,传递数据通过 ioctl
ret = -EPERM;
failure_string = "bad vm_flags";
goto err_bad_arg;
}
vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; // 将 VM_DONTCOP 置起,禁止 拷贝,禁止 写操作
vma->vm_ops = &binder_vm_ops;
vma->vm_private_data = proc;
ret = binder_alloc_mmap_handler(&proc->alloc, vma); // 再次完善 binder buffer allocator
return ret;
}
int binder_alloc_mmap_handler(struct binder_alloc *alloc,
struct vm_area_struct *vma)
{
int ret;
struct vm_struct *area;
const char *failure_string;
struct binder_buffer *buffer;
mutex_lock(&binder_alloc_mmap_lock);
if (alloc->buffer) { // 已经 mmap 过了
ret = -EBUSY;
failure_string = "already mapped";
goto err_already_mapped;
}
/*
* struct vm_struct : 描述内核空间虚拟地址,对应于 物理内存 的高端内存
* struct vm_area_struct : 描述用户空间虚拟地址
*/
area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); // 在 进程 的内核空间 分配一块 内核空间。这块内核空间并没有分配与之对应的物理地址.
alloc->buffer = area->addr; // alloc->buffer 指向的 分配的内核空间 的地址
alloc->user_buffer_offset = // 用户空间地址 和 内核空间地址 之间的偏移。
vma->vm_start - (uintptr_t)alloc->buffer;
mutex_unlock(&binder_alloc_mmap_lock);
alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
((vma->vm_end - vma->vm_start) / PAGE_SIZE),
GFP_KERNEL);
/*
* 要映射的内存大小是:vma->vm_end - vma->vm_start
* 要映射的内存需要多少个页:((vma->vm_end - vma->vm_start) / PAGE_SIZE)
* 每个页使用 struct page * 来描述
*/
alloc->buffer_size = vma->vm_end - vma->vm_start;
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); // 为 binder_buffer 分配空间
buffer->data = alloc->buffer; // buffer->data 指向 alloc->buffer,即 为 当前进程 mmap 的 内核空间地址
list_add(&buffer->entry, &alloc->buffers); // list of all buffers for this proc
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer); // 将 当前 buffer 加入到 红黑树 alloc->free_buffers 中,表示当前 buffer 是空闲buffer
alloc->free_async_space = alloc->buffer_size / 2; // 将 异步事务 的空间大小设置为 整个空间的一半
barrier();
alloc->vma = vma;
alloc->vma_vm_mm = vma->vm_mm;
return 0;
}
static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
{
return list_entry(buffer->entry.next, struct binder_buffer, entry);
}
static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
{
return list_entry(buffer->entry.prev, struct binder_buffer, entry);
}
static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
if (list_is_last(&buffer->entry, &alloc->buffers))
return (u8 *)alloc->buffer +
alloc->buffer_size - (u8 *)buffer->data;
return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
/*
* binder_alloc 是 buffer allocator. alloc->buffers 是当前进程的所有的 binder_buffer list.
*/
}
static void binder_insert_free_buffer(struct binder_alloc *alloc,
struct binder_buffer *new_buffer)
{
struct rb_node **p = &alloc->free_buffers.rb_node;
struct rb_node *parent = NULL;
struct binder_buffer *buffer;
size_t buffer_size;
size_t new_buffer_size;
BUG_ON(!new_buffer->free);
new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer); // 计算当前 buffer 的大小
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: add free buffer, size %zd, at %pK\n",
alloc->pid, new_buffer_size, new_buffer);
while (*p) {
parent = *p;
buffer = rb_entry(parent, struct binder_buffer, rb_node);
BUG_ON(!buffer->free);
buffer_size = binder_alloc_buffer_size(alloc, buffer);
if (new_buffer_size < buffer_size)
p = &parent->rb_left;
else
p = &parent->rb_right;
}
rb_link_node(&new_buffer->rb_node, parent, p);
rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
}
网友评论