Binder驱动3
ioctl
c
复制代码
// res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);
int ioctl(int fd , unsigned long cmd , .../* args */);
第一个参数fd
为设备的文件描述符
第二个参数cmd
为命令码,它由驱动方自定义,用户通过命令码告诉设备驱动想要它做什么
后面为可选参数,具体内容和cmd
有关,是传入驱动层的参数
命令码
Linux
内核是这么定义一个命令码的
这样,一个命令就变成了一个整数形式的命令码了,为了使用起来方便,Linux
定义了一些生成命令码的宏:
c
复制代码
_IO(type,nr) // 没有参数的命令
_IOR(type,nr,size) // 从驱动中读数据
_IOW(type,nr,size) // 写数据到驱动中
_IOWR(type,nr,size) // 双向读写
binder驱动命令码
了解了ioctl
和它的命令码后,我们来看看binder
驱动定义了哪些命令码,以及它们分别有什么作用
binder
驱动命令码被定义在include/uapi/linux/android/binder.h
中,其中有几个貌似未使用,我就不列出来了
c
复制代码
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref)
#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object)
-
BINDER_WRITE_READ
:读写命令,用于数据传输,binder IPC
通信中的核心 -
BINDER_SET_MAX_THREADS
:设置最大线程数 -
BINDER_SET_CONTEXT_MGR
:设置成为binder
上下文管理者 -
BINDER_THREAD_EXIT
:binder
线程退出命令,释放相关资源 -
BINDER_VERSION
:获取binder
驱动版本号 -
BINDER_GET_NODE_DEBUG_INFO
:获得binder
节点的debug
信息 -
BINDER_GET_NODE_INFO_FOR_REF
:从binder
引用获得binder
节点信息 -
BINDER_SET_CONTEXT_MGR_EXT
:和BINDER_SET_CONTEXT_MGR
作用相同,携带额外参数
了解了这些binder
驱动命令码,我们就可以开始正式分析binder_ioctl
binder_ioctl
这个函数位于drivers/android/binder.c
文件中
C
复制代码
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
void __user *ubuf = (void __user *)arg;
/*pr_info("binder_ioctl: %d:%d %x %lx\n",
proc->pid, current->pid, cmd, arg);*/
binder_selftest_alloc(&proc->alloc);
trace_binder_ioctl(cmd, arg);
// 进入休眠状态,等待被唤醒
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
goto err_unlocked;
// 根据请求系统调用的线程的pid,查找对应的binder_thread,没有则新建一个
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
goto err;
}
switch (cmd) {
case BINDER_WRITE_READ:
ret = binder_ioctl_write_read(filp, arg, thread);
if (ret)
goto err;
break;
case BINDER_SET_MAX_THREADS: {
int max_threads;
if (copy_from_user(&max_threads, ubuf,
sizeof(max_threads))) {
ret = -EINVAL;
goto err;
}
binder_inner_proc_lock(proc);
proc->max_threads = max_threads;
binder_inner_proc_unlock(proc);
break;
}
case BINDER_SET_CONTEXT_MGR_EXT: {
struct flat_binder_object fbo;
if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
ret = -EINVAL;
goto err;
}
ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
if (ret)
goto err;
break;
}
case BINDER_SET_CONTEXT_MGR:
ret = binder_ioctl_set_ctx_mgr(filp, NULL);
if (ret)
goto err;
break;
case BINDER_THREAD_EXIT:
binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
proc->pid, thread->pid);
binder_thread_release(proc, thread);
thread = NULL;
break;
case BINDER_VERSION: {
struct binder_version __user *ver = ubuf;
if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
&ver->protocol_version)) {
ret = -EINVAL;
goto err;
}
break;
}
case BINDER_GET_NODE_INFO_FOR_REF: {
struct binder_node_info_for_ref info;
if (copy_from_user(&info, ubuf, sizeof(info))) {
ret = -EFAULT;
goto err;
}
ret = binder_ioctl_get_node_info_for_ref(proc, &info);
if (ret < 0)
goto err;
if (copy_to_user(ubuf, &info, sizeof(info))) {
ret = -EFAULT;
goto err;
}
break;
}
case BINDER_GET_NODE_DEBUG_INFO: {
struct binder_node_debug_info info;
if (copy_from_user(&info, ubuf, sizeof(info))) {
ret = -EFAULT;
goto err;
}
ret = binder_ioctl_get_node_debug_info(proc, &info);
if (ret < 0)
goto err;
if (copy_to_user(ubuf, &info, sizeof(info))) {
ret = -EFAULT;
goto err;
}
break;
}
case BINDER_FREEZE: {
struct binder_freeze_info info;
struct binder_proc **target_procs = NULL, *target_proc;
int target_procs_count = 0, i = 0;
ret = 0;
if (copy_from_user(&info, ubuf, sizeof(info))) {
ret = -EFAULT;
goto err;
}
mutex_lock(&binder_procs_lock);
hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
if (target_proc->pid == info.pid)
target_procs_count++;
}
if (target_procs_count == 0) {
mutex_unlock(&binder_procs_lock);
ret = -EINVAL;
goto err;
}
target_procs = kcalloc(target_procs_count,
sizeof(struct binder_proc *),
GFP_KERNEL);
if (!target_procs) {
mutex_unlock(&binder_procs_lock);
ret = -ENOMEM;
goto err;
}
hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
if (target_proc->pid != info.pid)
continue;
binder_inner_proc_lock(target_proc);
target_proc->tmp_ref++;
binder_inner_proc_unlock(target_proc);
target_procs[i++] = target_proc;
}
mutex_unlock(&binder_procs_lock);
for (i = 0; i < target_procs_count; i++) {
if (ret >= 0)
ret = binder_ioctl_freeze(&info,
target_procs[i]);
binder_proc_dec_tmpref(target_procs[i]);
}
kfree(target_procs);
if (ret < 0)
goto err;
break;
}
case BINDER_GET_FROZEN_INFO: {
struct binder_frozen_status_info info;
if (copy_from_user(&info, ubuf, sizeof(info))) {
ret = -EFAULT;
goto err;
}
ret = binder_ioctl_get_freezer_info(&info);
if (ret < 0)
goto err;
if (copy_to_user(ubuf, &info, sizeof(info))) {
ret = -EFAULT;
goto err;
}
break;
}
case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
uint32_t enable;
if (copy_from_user(&enable, ubuf, sizeof(enable))) {
ret = -EFAULT;
goto err;
}
binder_inner_proc_lock(proc);
proc->oneway_spam_detection_enabled = (bool)enable;
binder_inner_proc_unlock(proc);
break;
}
case BINDER_GET_EXTENDED_ERROR:
ret = binder_ioctl_get_extended_error(thread, ubuf);
if (ret < 0)
goto err;
break;
default:
ret = -EINVAL;
goto err;
}
ret = 0;
err:
if (thread)
thread->looper_need_return = false;
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret && ret != -EINTR)
pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
err_unlocked:
trace_binder_ioctl_done(ret);
return ret;
}
__user
__user
是一个宏,它告诉编译器不应该解除这个指针的引用(因为在当前地址空间中它是没有意义的),(void __user *)arg
表示arg
是一个用户空间的地址,不能直接进行拷贝等,要使用copy_from_user
,copy_to_user
等函数。
wait_event_interruptible
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
是一个宏,它是用来挂起进程直到满足判断条件的
binder_stop_on_user_error
是一个全局变量,它的初始值为0,binder_user_error_wait
是一个等待队列
在正常情况下,binder_stop_on_user_error < 2
这个条件是成立的,所以不会进入挂起状态,而当binder
因为错误而停止后,调用binder_ioctl
,则会挂起进程,直到其他进程通过wake_up_interruptible
来唤醒binder_user_error_wait
队列,并且满足binder_stop_on_user_error < 2
这个条件,binder_ioctl
才会继续往后运行
binder_thread结构体
我们需要关注一个重要的结构体binder_thread
,它在后续的代码中会频繁的出现,这个结构体描述了进程中的工作线程
C
复制代码
struct binder_thread {
// binder线程所属的进程
struct binder_proc *proc;
// 红黑树节点
struct rb_node rb_node;
// 链表节点
struct list_head waiting_thread_node;
int pid;
int looper; /* only modified by this thread */
bool looper_need_return; /* can be written by other thread */
// binder事务栈(链表形式,内部存在前后节点)
struct binder_transaction *transaction_stack;
// todo队列,为需要处理的工作的链表
struct list_head todo;
// binder_thread_write后是否立即执行完成binder_thread_read
// false的情况下会在binder_thread_read中休眠,延迟执行BINDER_WORK_TRANSACTION_COMPLETE
bool process_todo;
struct binder_error return_error;
struct binder_error reply_error;
struct binder_extended_error ee;
// 等待队列,当处理binder事务需要依赖别的binder事务的时候,则会以此等待队列睡眠
// 直到它所依赖的binder事务完成后唤醒
wait_queue_head_t wait;
struct binder_stats stats;
atomic_t tmp_ref;
bool is_dead;
// 线程信息结构体
struct task_struct *task;
spinlock_t prio_lock;
struct binder_priority prio_next;
enum binder_prio_state prio_state;
};
binder_get_thread
接下来我们看一下binder_ioctl
是怎么获得binder_thread
的
c
复制代码
static struct binder_thread *binder_get_thread(struct binder_proc *proc)
{
struct binder_thread *thread;
struct binder_thread *new_thread;
binder_inner_proc_lock(proc);
thread = binder_get_thread_ilocked(proc, NULL);
binder_inner_proc_unlock(proc);
if (!thread) {
new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
if (new_thread == NULL)
return NULL;
binder_inner_proc_lock(proc);
thread = binder_get_thread_ilocked(proc, new_thread);
binder_inner_proc_unlock(proc);
if (thread != new_thread)
kfree(new_thread);
}
return thread;
}
简单浏览一下代码我们就可以知道,binder_get_thread
首先试着从binder_proc
获得binder_thread
,如果没能获得,就新建一个,这两种情况都调用了binder_get_thread_ilocked
函数
c
复制代码
static struct binder_thread *binder_get_thread_ilocked(
struct binder_proc *proc, struct binder_thread *new_thread)
{
struct binder_thread *thread = NULL;
struct rb_node *parent = NULL;
struct rb_node **p = &proc->threads.rb_node;
while (*p) {
parent = *p;
thread = rb_entry(parent, struct binder_thread, rb_node);
if (current->pid < thread->pid)
p = &(*p)->rb_left;
else if (current->pid > thread->pid)
p = &(*p)->rb_right;
else
return thread;
}
if (!new_thread)
return NULL;
thread = new_thread;
binder_stats_created(BINDER_STAT_THREAD);
thread->proc = proc;
thread->pid = current->pid;
get_task_struct(current);
thread->task = current;
atomic_set(&thread->tmp_ref, 0);
init_waitqueue_head(&thread->wait);
INIT_LIST_HEAD(&thread->todo);
rb_link_node(&thread->rb_node, parent, p);
rb_insert_color(&thread->rb_node, &proc->threads);
thread->looper_need_return = true;
thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
thread->return_error.cmd = BR_OK;
thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
thread->reply_error.cmd = BR_OK;
spin_lock_init(&thread->prio_lock);
thread->prio_state = BINDER_PRIO_SET;
thread->ee.command = BR_OK;
INIT_LIST_HEAD(&new_thread->waiting_thread_node);
return thread;
}
这个函数分为前后两个部分,前半部分通过binder_proc->threads
这个红黑树查找当前系统调用进程pid
所对应的binder_thread
,后半部分初始化了传入的new_thread
,并将其插入到红黑树中(binder_proc->threads
)
接下来就是判断命令码cmd
,来执行相应的工作了,我们只分析比较重要的几个命令码
BINDER_WRITE_READ
binder
驱动中最重要的命令码肯定非BINDER_WRITE_READ
莫属了,这个命令用来进行binder
读写交互
c
复制代码
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
void __user *ubuf = (void __user *)arg;
/*pr_info("binder_ioctl: %d:%d %x %lx\n",
proc->pid, current->pid, cmd, arg);*/
binder_selftest_alloc(&proc->alloc);
trace_binder_ioctl(cmd, arg);
// 进入休眠状态,等待被唤醒
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
goto err_unlocked;
// 根据请求系统调用的线程的pid,查找对应的binder_thread,没有则新建一个
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
goto err;
}
switch (cmd) {
case BINDER_WRITE_READ:
ret = binder_ioctl_write_read(filp, arg, thread);
if (ret)
goto err;
break;
}
switch case命令码后,直接调用了binder_ioctl_write_read
函数
c
复制代码
static int binder_ioctl_write_read(struct file *filp, unsigned long arg,
struct binder_thread *thread)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
void __user *ubuf = (void __user *)arg;
struct binder_write_read bwr;
// 将用户空间ubuf拷贝至内核空间bwr
if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"%d:%d write %lld at %016llx, read %lld at %016llx\n",
proc->pid, thread->pid,
(u64)bwr.write_size, (u64)bwr.write_buffer,
(u64)bwr.read_size, (u64)bwr.read_buffer);
if (bwr.write_size > 0) {
// 当写缓存中有数据,执行binder写操作
ret = binder_thread_write(proc, thread,
bwr.write_buffer,
bwr.write_size,
&bwr.write_consumed);
trace_binder_write_done(ret);
if (ret < 0) {
bwr.read_consumed = 0;
// 将内核空间修改后的bwr拷贝至用户空间ubuf
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
if (bwr.read_size > 0) {
// 当读缓存中有数据,执行binder读操作
ret = binder_thread_read(proc, thread, bwr.read_buffer,
bwr.read_size,
&bwr.read_consumed,
filp->f_flags & O_NONBLOCK);
trace_binder_read_done(ret);
binder_inner_proc_lock(proc);
if (!binder_worklist_empty_ilocked(&proc->todo))
binder_wakeup_proc_ilocked(proc);
binder_inner_proc_unlock(proc);
if (ret < 0) {
// 将内核空间修改后的bwr拷贝至用户空间ubuf
if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
ret = -EFAULT;
goto out;
}
}
binder_debug(BINDER_DEBUG_READ_WRITE,
"%d:%d wrote %lld of %lld, read return %lld of %lld\n",
proc->pid, thread->pid,
(u64)bwr.write_consumed, (u64)bwr.write_size,
(u64)bwr.read_consumed, (u64)bwr.read_size);
// 将内核空间修改后的bwr拷贝至用户空间ubuf
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
ret = -EFAULT;
goto out;
}
out:
return ret;
}
binder_write_read结构体
BINDER_WRITE_READ
命令码所接受的参数为一个binder_write_read
结构体,我们先来了解一下它
c
复制代码
struct binder_write_read {
binder_size_t write_size; /* 写数据的总大小 */
binder_size_t write_consumed; /* 已写数据大小 */
binder_uintptr_t write_buffer; /* 写数据的虚拟内存地址 */
binder_size_t read_size; /* 读数据的总大小 */
binder_size_t read_consumed; /* 已读数据大小 */
binder_uintptr_t read_buffer; /* 读数据的虚拟内存地址 */
};
整个binder_ioctl_write_read
函数结构是比较简单的,首先校验了一下用户空间所传的参数arg
为binder_write_read
结构体,接着将其从用户空间拷贝至内核空间bwr
,接下来便是分别检查写缓存读缓存中是否有数据,有的话则执行相应的写读操作。这里需要注意的是,读写操作所传入的write_consumed
和read_consumed
是以地址的形式,即会对这两个值进行修改,不管读写操作是否执行,成功或者失败,最后都会调用copy_to_user
将bwr
从内核空间复制到用户空间ubuf
看到这里,可能有些同学会觉得有些奇怪,说好binder
只进行一次复制的呢?其实是这样的没错,这里的copy_from_user
或者copy_to_user
只是复制了binder_write_read
结构体,得到了需要IPC数据的虚拟内存地址而已,真正的复制操作是在binder
读写操作中进行的
binder_thread_write
先看binder
写操作,这个函数首先从传入的参数中,计算出待写的起始地址和结束地址,因为可能数据中含有多个命令和对应数据要处理,所以这里开了一个循环,在循环中,首先调用get_user
,从用户空间读取一个值到内核空间中来,这个值就是binder
请求码,然后将指针向后移动32位,使其指向对应请求码的数据头,接着根据binder
请求码去完成不同的工作,处理完后修改已写数据大小
c
复制代码
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
struct binder_context *context = proc->context;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
// 可能含有多个命令和对应数据要处理
while (ptr < end && thread->return_error.cmd == BR_OK) {
int ret;
// 获得binder请求码
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
trace_binder_command(cmd);
if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
}
switch (cmd) {
case BC_INCREFS:
case BC_ACQUIRE:
case BC_RELEASE:
case BC_DECREFS: {
uint32_t target;
const char *debug_string;
bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
struct binder_ref_data rdata;
if (get_user(target, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
ret = -1;
if (increment && !target) {
struct binder_node *ctx_mgr_node;
mutex_lock(&context->context_mgr_node_lock);
ctx_mgr_node = context->binder_context_mgr_node;
if (ctx_mgr_node) {
if (ctx_mgr_node->proc == proc) {
binder_user_error("%d:%d context manager tried to acquire desc 0\n",
proc->pid, thread->pid);
mutex_unlock(&context->context_mgr_node_lock);
return -EINVAL;
}
ret = binder_inc_ref_for_node(
proc, ctx_mgr_node,
strong, NULL, &rdata);
}
mutex_unlock(&context->context_mgr_node_lock);
}
if (ret)
ret = binder_update_ref_for_handle(
proc, target, increment, strong,
&rdata);
if (!ret && rdata.desc != target) {
binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
proc->pid, thread->pid,
target, rdata.desc);
}
switch (cmd) {
case BC_INCREFS:
debug_string = "IncRefs";
break;
case BC_ACQUIRE:
debug_string = "Acquire";
break;
case BC_RELEASE:
debug_string = "Release";
break;
case BC_DECREFS:
default:
debug_string = "DecRefs";
break;
}
if (ret) {
binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
proc->pid, thread->pid, debug_string,
strong, target, ret);
break;
}
binder_debug(BINDER_DEBUG_USER_REFS,
"%d:%d %s ref %d desc %d s %d w %d\n",
proc->pid, thread->pid, debug_string,
rdata.debug_id, rdata.desc, rdata.strong,
rdata.weak);
break;
}
case BC_INCREFS_DONE:
case BC_ACQUIRE_DONE: {
binder_uintptr_t node_ptr;
binder_uintptr_t cookie;
struct binder_node *node;
bool free_node;
if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
node = binder_get_node(proc, node_ptr);
if (node == NULL) {
binder_user_error("%d:%d %s u%016llx no match\n",
proc->pid, thread->pid,
cmd == BC_INCREFS_DONE ?
"BC_INCREFS_DONE" :
"BC_ACQUIRE_DONE",
(u64)node_ptr);
break;
}
if (cookie != node->cookie) {
binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
proc->pid, thread->pid,
cmd == BC_INCREFS_DONE ?
"BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
(u64)node_ptr, node->debug_id,
(u64)cookie, (u64)node->cookie);
binder_put_node(node);
break;
}
binder_node_inner_lock(node);
if (cmd == BC_ACQUIRE_DONE) {
if (node->pending_strong_ref == 0) {
binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
proc->pid, thread->pid,
node->debug_id);
binder_node_inner_unlock(node);
binder_put_node(node);
break;
}
node->pending_strong_ref = 0;
} else {
if (node->pending_weak_ref == 0) {
binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
proc->pid, thread->pid,
node->debug_id);
binder_node_inner_unlock(node);
binder_put_node(node);
break;
}
node->pending_weak_ref = 0;
}
free_node = binder_dec_node_nilocked(node,
cmd == BC_ACQUIRE_DONE, 0);
WARN_ON(free_node);
binder_debug(BINDER_DEBUG_USER_REFS,
"%d:%d %s node %d ls %d lw %d tr %d\n",
proc->pid, thread->pid,
cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
node->debug_id, node->local_strong_refs,
node->local_weak_refs, node->tmp_refs);
binder_node_inner_unlock(node);
binder_put_node(node);
break;
}
case BC_ATTEMPT_ACQUIRE:
pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
return -EINVAL;
case BC_ACQUIRE_RESULT:
pr_err("BC_ACQUIRE_RESULT not supported\n");
return -EINVAL;
case BC_FREE_BUFFER: {
binder_uintptr_t data_ptr;
struct binder_buffer *buffer;
if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
buffer = binder_alloc_prepare_to_free(&proc->alloc,
data_ptr);
if (IS_ERR_OR_NULL(buffer)) {
if (PTR_ERR(buffer) == -EPERM) {
binder_user_error(
"%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
proc->pid, thread->pid,
(u64)data_ptr);
} else {
binder_user_error(
"%d:%d BC_FREE_BUFFER u%016llx no match\n",
proc->pid, thread->pid,
(u64)data_ptr);
}
break;
}
binder_debug(BINDER_DEBUG_FREE_BUFFER,
"%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
proc->pid, thread->pid, (u64)data_ptr,
buffer->debug_id,
buffer->transaction ? "active" : "finished");
binder_free_buf(proc, thread, buffer, false);
break;
}
case BC_TRANSACTION_SG:
case BC_REPLY_SG: {
struct binder_transaction_data_sg tr;
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr.transaction_data,
cmd == BC_REPLY_SG, tr.buffers_size);
break;
}
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr,
cmd == BC_REPLY, 0);
break;
}
case BC_REGISTER_LOOPER:
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BC_REGISTER_LOOPER\n",
proc->pid, thread->pid);
binder_inner_proc_lock(proc);
if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
proc->pid, thread->pid);
} else if (proc->requested_threads == 0) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
proc->pid, thread->pid);
} else {
proc->requested_threads--;
proc->requested_threads_started++;
}
thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
binder_inner_proc_unlock(proc);
break;
case BC_ENTER_LOOPER:
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BC_ENTER_LOOPER\n",
proc->pid, thread->pid);
if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
proc->pid, thread->pid);
}
thread->looper |= BINDER_LOOPER_STATE_ENTERED;
break;
case BC_EXIT_LOOPER:
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BC_EXIT_LOOPER\n",
proc->pid, thread->pid);
thread->looper |= BINDER_LOOPER_STATE_EXITED;
break;
case BC_REQUEST_DEATH_NOTIFICATION:
case BC_CLEAR_DEATH_NOTIFICATION: {
uint32_t target;
binder_uintptr_t cookie;
struct binder_ref *ref;
struct binder_ref_death *death = NULL;
if (get_user(target, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
/*
* Allocate memory for death notification
* before taking lock
*/
death = kzalloc(sizeof(*death), GFP_KERNEL);
if (death == NULL) {
WARN_ON(thread->return_error.cmd !=
BR_OK);
thread->return_error.cmd = BR_ERROR;
binder_enqueue_thread_work(
thread,
&thread->return_error.work);
binder_debug(
BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
proc->pid, thread->pid);
break;
}
}
binder_proc_lock(proc);
ref = binder_get_ref_olocked(proc, target, false);
if (ref == NULL) {
binder_user_error("%d:%d %s invalid ref %d\n",
proc->pid, thread->pid,
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
"BC_REQUEST_DEATH_NOTIFICATION" :
"BC_CLEAR_DEATH_NOTIFICATION",
target);
binder_proc_unlock(proc);
kfree(death);
break;
}
binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
"%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
proc->pid, thread->pid,
cmd == BC_REQUEST_DEATH_NOTIFICATION ?
"BC_REQUEST_DEATH_NOTIFICATION" :
"BC_CLEAR_DEATH_NOTIFICATION",
(u64)cookie, ref->data.debug_id,
ref->data.desc, ref->data.strong,
ref->data.weak, ref->node->debug_id);
binder_node_lock(ref->node);
if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
if (ref->death) {
binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
proc->pid, thread->pid);
binder_node_unlock(ref->node);
binder_proc_unlock(proc);
kfree(death);
break;
}
binder_stats_created(BINDER_STAT_DEATH);
INIT_LIST_HEAD(&death->work.entry);
death->cookie = cookie;
ref->death = death;
if (ref->node->proc == NULL) {
ref->death->work.type = BINDER_WORK_DEAD_BINDER;
binder_inner_proc_lock(proc);
binder_enqueue_work_ilocked(
&ref->death->work, &proc->todo);
binder_wakeup_proc_ilocked(proc);
binder_inner_proc_unlock(proc);
}
} else {
if (ref->death == NULL) {
binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
proc->pid, thread->pid);
binder_node_unlock(ref->node);
binder_proc_unlock(proc);
break;
}
death = ref->death;
if (death->cookie != cookie) {
binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
proc->pid, thread->pid,
(u64)death->cookie,
(u64)cookie);
binder_node_unlock(ref->node);
binder_proc_unlock(proc);
break;
}
ref->death = NULL;
binder_inner_proc_lock(proc);
if (list_empty(&death->work.entry)) {
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
if (thread->looper &
(BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))
binder_enqueue_thread_work_ilocked(
thread,
&death->work);
else {
binder_enqueue_work_ilocked(
&death->work,
&proc->todo);
binder_wakeup_proc_ilocked(
proc);
}
} else {
BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
}
binder_inner_proc_unlock(proc);
}
binder_node_unlock(ref->node);
binder_proc_unlock(proc);
} break;
case BC_DEAD_BINDER_DONE: {
struct binder_work *w;
binder_uintptr_t cookie;
struct binder_ref_death *death = NULL;
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(cookie);
binder_inner_proc_lock(proc);
list_for_each_entry(w, &proc->delivered_death,
entry) {
struct binder_ref_death *tmp_death =
container_of(w,
struct binder_ref_death,
work);
if (tmp_death->cookie == cookie) {
death = tmp_death;
break;
}
}
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
proc->pid, thread->pid, (u64)cookie,
death);
if (death == NULL) {
binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
proc->pid, thread->pid, (u64)cookie);
binder_inner_proc_unlock(proc);
break;
}
binder_dequeue_work_ilocked(&death->work);
if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
if (thread->looper &
(BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED))
binder_enqueue_thread_work_ilocked(
thread, &death->work);
else {
binder_enqueue_work_ilocked(
&death->work,
&proc->todo);
binder_wakeup_proc_ilocked(proc);
}
}
binder_inner_proc_unlock(proc);
} break;
default:
pr_err("%d:%d unknown command %u\n",
proc->pid, thread->pid, cmd);
return -EINVAL;
}
*consumed = ptr - buffer;
}
return 0;
}
其中,最重要且最频繁的操作为BC_TRANSACTION
/BC_REPLY
,我们就只分析一下这两个请求码做了什么事
binder_transaction
c
复制代码
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr,
cmd == BC_REPLY, 0);
break;
}
}
binder_transaction结构体
binder_transaction
结构体用来描述进程间通信过程(事务),它被定义在drivers/android/binder.c
中
c
复制代码
struct binder_transaction {
int debug_id;
//用来描述需要处理的工作事项
struct binder_work work;
//发起事务的线程
struct binder_thread *from;
//事务所依赖的另一个事务
struct binder_transaction *from_parent;
//处理该事务的进程
struct binder_proc *to_proc;
//处理该事务的线程
struct binder_thread *to_thread;
//目标线程下一个需要处理的事务
struct binder_transaction *to_parent;
//1: 表示同步事务,需要等待对方回复
//0: 表示异步事务
unsigned need_reply:1;
//指向为该事务分配的内核缓冲区
struct binder_buffer *buffer;
unsigned int code;
unsigned int flags;
//发起事务线程的优先级
struct binder_priority priority;
//线程在处理事务时,驱动会修改它的优先级以满足源线程和目标Service组建的要求
//在修改之前,会将它原来的线程优先级保存在该成员中,以便线程处理完该事务后可以恢复原来的优先级
struct binder_priority saved_priority;
bool set_priority_called;
kuid_t sender_euid;
binder_uintptr_t security_ctx;
spinlock_t lock;
};
binder_work结构体
binder_work
结构体用来描述需要处理的工作事项,它被定义在drivers/android/binder.c
中
c
复制代码
struct binder_work {
//双向链表中的一个节点,这个链表储存了所有的binder_work
struct list_head entry;
//工作项类型
enum binder_work_type {
BINDER_WORK_TRANSACTION = 1,
BINDER_WORK_TRANSACTION_COMPLETE,
BINDER_WORK_RETURN_ERROR,
BINDER_WORK_NODE,
BINDER_WORK_DEAD_BINDER,
BINDER_WORK_DEAD_BINDER_AND_CLEAR,
BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
} type;
};
简单看完了一些必要的结构体后,我们把目光转回binder_transaction
函数上
binder_transaction
函数的代码很长,我们精简一下,然后再分段来看,从整体上,我们可以将它分为几个部分:
- 获得目标进程/线程信息
- 将数据拷贝到目标进程所映射的内存中(此时会建立实际的映射关系)
- 将待处理的任务加入
todo
队列,唤醒目标线程
第一部分:获得目标进程/线程信息
这里根据是否为reply
,分成了两种情况
BC_TRANSACTION
我们先看BC_TRANSACTION
的情况
c
复制代码
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
binder_size_t extra_buffers_size)
{
struct binder_proc *target_proc = NULL;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
uint32_t return_error = 0;
struct binder_context *context = proc->context;
if (reply) {
...
} else {
if (tr->target.handle) {
struct binder_ref *ref;
binder_proc_lock(proc);
//查找binder引用
ref = binder_get_ref_olocked(proc, tr->target.handle, true);
//通过目标binder实体获取目标进程信息
target_node = binder_get_node_refs_for_txn(
ref->node, &target_proc,
&return_error);
binder_proc_unlock(proc);
} else { //handle为0代表目标target是ServiceManager
mutex_lock(&context->context_mgr_node_lock);
//ServiceManager为binder驱动的context,所以可以直接从context中获取binder实体
target_node = context->binder_context_mgr_node;
if (target_node)
//通过目标binder实体获取目标进程信息
target_node = binder_get_node_refs_for_txn(
target_node, &target_proc,
&return_error);
else
return_error = BR_DEAD_REPLY;
mutex_unlock(&context->context_mgr_node_lock);
if (target_node && target_proc == proc) {
... //error
}
}
...
//使用LSM进行安全检查
if (security_binder_transaction(proc->tsk,
target_proc->tsk) < 0) {
... //error
}
binder_inner_proc_lock(proc);
//flags不带TF_ONE_WAY(即需要reply)并且当前线程存在binder事务栈
if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
struct binder_transaction *tmp;
tmp = thread->transaction_stack;
if (tmp->to_thread != thread) {
... //error
}
//寻找一个合适的目标binder线程
while (tmp) {
struct binder_thread *from;
spin_lock(&tmp->lock);
from = tmp->from;
if (from && from->proc == target_proc) {
atomic_inc(&from->tmp_ref);
target_thread = from;
spin_unlock(&tmp->lock);
break;
}
spin_unlock(&tmp->lock);
tmp = tmp->from_parent;
}
}
binder_inner_proc_unlock(proc);
}
...
}
可以看到,虽然整个函数很长很复杂,但经过我们的拆分精简,逻辑就清晰很多了
binder_transaction_data.target.handle
用一个int
值表示目标binder
引用,当它不为0时,调用binder_get_ref_olocked
函数查找binder_ref
BC_REPLY
接着,我们再看BC_REPLY
的情况
c
复制代码
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
binder_size_t extra_buffers_size)
{
struct binder_proc *target_proc = NULL;
struct binder_thread *target_thread = NULL;
struct binder_transaction *in_reply_to = NULL;
if (reply) {
binder_inner_proc_lock(proc);
//这个事务是发起事务,也就是说我们需要对这个事务做应答
in_reply_to = thread->transaction_stack;
if (in_reply_to == NULL) {
... //error
}
if (in_reply_to->to_thread != thread) {
... //error
}
//改指向下一个需要处理的事务,即将这个事务移出链表
thread->transaction_stack = in_reply_to->to_parent;
binder_inner_proc_unlock(proc);
//目标线程即为需要回应的事务的发起线程
target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
if (target_thread->transaction_stack != in_reply_to) {
... //error
}
//通过binder_thread获得binder_proc
target_proc = target_thread->proc;
atomic_inc(&target_proc->tmp_ref);
binder_inner_proc_unlock(target_thread->proc);
} else {
...
}
...
}
第二部分:数据拷贝,建立映射
c
复制代码
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
binder_size_t extra_buffers_size)
{
int ret;
struct binder_transaction *t;
struct binder_work *tcomplete;
binder_size_t *offp, *off_end, *off_start;
binder_size_t off_min;
u8 *sg_bufp, *sg_buf_end;
struct binder_proc *target_proc = NULL;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL
u32 secctx_sz = 0;
...
//为目标进程binder事务分配空间(后续会加到目标进程/线程的todo队列中,由目标进程/线程处理这个事务)
t = kzalloc(sizeof(*t), GFP_KERNEL);
spin_lock_init(&t->lock);
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
t->debug_id = t_debug_id;
//设置事务发起线程
if (!reply && !(tr->flags & TF_ONE_WAY))
t->from = thread;
else
t->from = NULL;
t->sender_euid = task_euid(proc->tsk);
//设置事务处理进程
t->to_proc = target_proc;
//设置事务处理线程
t->to_thread = target_thread;
t->code = tr->code;
t->flags = tr->flags;
//设置优先级
if (!(t->flags & TF_ONE_WAY) &&
binder_supported_policy(current->policy)) {
/* Inherit supported policies for synchronous transactions */
t->priority.sched_policy = current->policy;
t->priority.prio = current->normal_prio;
} else {
/* Otherwise, fall back to the default priority */
t->priority = target_proc->default_priority;
}
//安全相关
if (target_node && target_node->txn_security_ctx) {
...
}
//分配缓存,建立映射
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
tr->offsets_size, extra_buffers_size,
!reply && (t->flags & TF_ONE_WAY));
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
off_start = (binder_size_t *)(t->buffer->data +
ALIGN(tr->data_size, sizeof(void *)));
offp = off_start;
//这里就是真正的一次复制
copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
tr->data.ptr.buffer, tr->data_size);
copy_from_user(offp, (const void __user *)(uintptr_t)
tr->data.ptr.offsets, tr->offsets_size);
//检查数据对齐
if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
... //error
}
if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
... //error
}
off_end = (void *)off_start + tr->offsets_size;
sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
sg_buf_end = sg_bufp + extra_buffers_size -
ALIGN(secctx_sz, sizeof(u64));
off_min = 0;
//循环遍历每一个binder对象
for (; offp < off_end; offp++) {
struct binder_object_header *hdr;
size_t object_size = binder_validate_object(t->buffer, *offp);
if (object_size == 0 || *offp < off_min) {
... //error
}
hdr = (struct binder_object_header *)(t->buffer->data + *offp);
off_min = *offp + object_size;
switch (hdr->type) {
//需要对binder类型进行转换
//因为在A进程中为本地binder对象,对于B进程则为远程binder对象,反之亦然
case BINDER_TYPE_BINDER:
case BINDER_TYPE_WEAK_BINDER: {
struct flat_binder_object *fp;
fp = to_flat_binder_object(hdr);
ret = binder_translate_binder(fp, t, thread);
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
struct flat_binder_object *fp;
fp = to_flat_binder_object(hdr);
ret = binder_translate_handle(fp, t, thread);
} break;
case BINDER_TYPE_FD: {
...
} break;
case BINDER_TYPE_FDA: {
...
} break;
case BINDER_TYPE_PTR: {
...
} break;
default:
... //error
}
}
//设置工作类型
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
//设置目标进程的事务类型
t->work.type = BINDER_WORK_TRANSACTION;
...
}
文章评论