概述
ServiceManager是Binder IPC通信过程中的守护进程,本身也是一个Binder服务,但并没有采用libbinder中的多线程模型来与Binder驱动通信,而是自行编写了binder.c直接和Binder驱动来通信,并且只有一个循环binder_loop来进行读取和处理事务,这样的好处是简单而高效。
ServiceManager本身工作相对简单,其功能:查询和注册服务。 对于Binder IPC通信过程中,其实更多的情形是BpBinder和BBinder之间的通信,比如ActivityManagerProxy和ActivityManagerService之间的通信等。
流程图
详细UML解析
ServiceManager启动过程主要以下几个阶段:
打开binder驱动:binder_open;
注册成为binder服务的大管家:binder_become_context_manager;
进入无限循环,处理client端发来的请求:binder_loop;
启动过程
ServiceManager是由init进程通过解析init.rc文件而创建的,其所对应的可执行程序/system/bin/servicemanager,所对应的源文件是service_manager.c,进程名为/system/bin/servicemanager。ServiceManager是由init进程通过解析init.rc文件而创建的,其所对应的可执行程序/system/bin/servicemanager,所对应的源文件是service_manager.c,进程名为/system/bin/servicemanager。
启动Service Manager的入口函数是service_manager.c中的main()方法,代码如下: 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 int main(int argc, char **argv) { struct binder_state *bs; //打开binder驱动,申请128k字节大小的内存空间 bs = binder_open(128*1024); ... //成为上下文管理者 if (binder_become_context_manager(bs)) { return -1; } selinux_enabled = is_selinux_enabled(); //selinux权限是否使能 sehandle = selinux_android_service_context_handle(); selinux_status_open(true); if (selinux_enabled > 0) { if (sehandle == NULL) { abort(); //无法获取sehandle } if (getcon(&service_manager_context) != 0) { abort(); //无法获取service_manager上下文 } } ... //进入无限循环,处理client端发来的请求 binder_loop(bs, svcmgr_handler); return 0; }
binder_open 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 struct binder_state *binder_open(size_t mapsize) { struct binder_state *bs;【见小节2.2.1】 struct binder_version vers; bs = malloc(sizeof(*bs)); if (!bs) { errno = ENOMEM; return NULL; } //通过系统调用陷入内核,打开Binder设备驱动 bs->fd = open("/dev/binder", O_RDWR); if (bs->fd < 0) { goto fail_open; // 无法打开binder设备 } //通过系统调用,ioctl获取binder版本信息 if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) || (vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) { goto fail_open; //内核空间与用户空间的binder不是同一版本 } bs->mapsize = mapsize; //通过系统调用,mmap内存映射,mmap必须是page的整数倍 bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0); if (bs->mapped == MAP_FAILED) { goto fail_map; // binder设备内存无法映射 } return bs; fail_map: close(bs->fd); fail_open: free(bs); return NULL; }
先调用open()打开binder设备,open()方法经过系统调用,进入Binder驱动,然后调用方法binder_open(),该方法会在Binder驱动层 创建一个binder_proc 对象,再将binder_proc对象赋值给fd->private_data ,同时放入全局链表binder_procs 。再通过ioctl()检验当前binder版本与Binder驱动层的版本是否一致 。
调用mmap()进行内存映射,同理mmap()方法经过系统调用,对应于Binder驱动层的binder_mmap()方法,该方法会在Binder驱动层创建Binder_buffer对象,并放入当前binder_proc的proc->buffers链表 。
binder_become_context_manager 1 2 3 4 5 int binder_become_context_manager(struct binder_state *bs) { //通过ioctl,传递BINDER_SET_CONTEXT_MGR指令 return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0); }
1 2 3 4 5 6 7 8 9 10 11 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { binder_lock(__func__); switch (cmd) { case BINDER_SET_CONTEXT_MGR: ret = binder_ioctl_set_ctx_mgr(filp);//【见小节2.3.2】 break; } case :... } binder_unlock(__func__); }
根据参数BINDER_SET_CONTEXT_MGR ,最终调用binder_ioctl_set_ctx_mgr()方法,这个过程会持有 binder_main_lock 。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 static int binder_ioctl_set_ctx_mgr(struct file *filp) { int ret = 0; struct binder_proc *proc = filp->private_data; kuid_t curr_euid = current_euid(); //保证只创建一次mgr_node对象 if (binder_context_mgr_node != NULL) { ret = -EBUSY; goto out; } if (uid_valid(binder_context_mgr_uid)) { ... } else { //设置当前线程euid作为Service Manager的uid binder_context_mgr_uid = curr_euid; } //创建ServiceManager实体【见小节2.3.3】 binder_context_mgr_node = binder_new_node(proc, 0, 0); ... binder_context_mgr_node->local_weak_refs++; binder_context_mgr_node->local_strong_refs++; binder_context_mgr_node->has_strong_ref = 1; binder_context_mgr_node->has_weak_ref = 1; out: return ret; }
创建了全局的binder_node对象binder_context_mgr_node ,并将binder_context_mgr_node 的强弱引用各加1.
binder_loop 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 void binder_loop(struct binder_state *bs, binder_handler func) { int res; struct binder_write_read bwr; uint32_t readbuf[32]; bwr.write_size = 0; bwr.write_consumed = 0; bwr.write_buffer = 0; readbuf[0] = BC_ENTER_LOOPER; //将BC_ENTER_LOOPER命令发送给binder驱动,让Service Manager进入循环 binder_write(bs, readbuf, sizeof(uint32_t)); for (;;) { bwr.read_size = sizeof(readbuf); bwr.read_consumed = 0; bwr.read_buffer = (uintptr_t) readbuf; res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr); //进入循环,不断地binder读写过程 if (res < 0) { break; } // 解析binder信息 res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func); if (res == 0) { break; } if (res < 0) { break; } } }
binder_parse 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 int binder_parse(struct binder_state *bs, struct binder_io *bio, uintptr_t ptr, size_t size, binder_handler func) { int r = 1; uintptr_t end = ptr + (uintptr_t) size; while (ptr < end) { uint32_t cmd = *(uint32_t *) ptr; ptr += sizeof(uint32_t); switch(cmd) { case BR_NOOP: //无操作,退出循环 break; case BR_TRANSACTION_COMPLETE: break; case BR_INCREFS: case BR_ACQUIRE: case BR_RELEASE: case BR_DECREFS: ptr += sizeof(struct binder_ptr_cookie); break; case BR_TRANSACTION: { struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr; ... binder_dump_txn(txn); if (func) { unsigned rdata[256/4]; struct binder_io msg; struct binder_io reply; int res; bio_init(&reply, rdata, sizeof(rdata), 4); bio_init_from_txn(&msg, txn); //从txn解析出binder_io信息 res = func(bs, txn, &msg, &reply); binder_send_reply(bs, &reply, txn->data.ptr.buffer, res); } ptr += sizeof(*txn); break; } case BR_REPLY: { struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr; ... binder_dump_txn(txn); if (bio) { bio_init_from_txn(bio, txn); bio = 0; } ptr += sizeof(*txn); r = 0; break; } case BR_DEAD_BINDER: { struct binder_death *death = (struct binder_death *)(uintptr_t) *(binder_uintptr_t *)ptr; ptr += sizeof(binder_uintptr_t); // binder死亡消息 death->func(bs, death->ptr); break; } case BR_FAILED_REPLY: r = -1; break; case BR_DEAD_REPLY: r = -1; break; default: return -1; } } return r; }
解析binder信息,此处参数ptr指向BC_ENTER_LOOPER,func指向svcmgr_handler。故有请求到来,则调用svcmgr_handler。
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 int svcmgr_handler(struct binder_state *bs, struct binder_transaction_data *txn, struct binder_io *msg, struct binder_io *reply) { struct svcinfo *si; uint16_t *s; size_t len; uint32_t handle; uint32_t strict_policy; int allow_isolated; ... strict_policy = bio_get_uint32(msg); s = bio_get_string16(msg, &len); ... switch(txn->code) { case SVC_MGR_GET_SERVICE: case SVC_MGR_CHECK_SERVICE: s = bio_get_string16(msg, &len); //服务名 //根据名称查找相应服务 handle = do_find_service(bs, s, len, txn->sender_euid, txn->sender_pid); bio_put_ref(reply, handle); return 0; case SVC_MGR_ADD_SERVICE: s = bio_get_string16(msg, &len); //服务名 handle = bio_get_ref(msg); //handle allow_isolated = bio_get_uint32(msg) ? 1 : 0; //注册指定服务 if (do_add_service(bs, s, len, handle, txn->sender_euid, allow_isolated, txn->sender_pid)) return -1; break; case SVC_MGR_LIST_SERVICES: { uint32_t n = bio_get_uint32(msg); if (!svc_can_list(txn->sender_pid)) { return -1; } si = svclist; while ((n-- > 0) && si) si = si->next; if (si) { bio_put_string16(reply, si->name); return 0; } return -1; } default: return -1; } bio_put_uint32(reply, 0); return 0; }
该方法的功能:查询服务,注册服务,以及列举所有服务
核心工作
servicemanager的核心工作就是注册服务和查询服务 。
do_find_service 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 uint32_t do_find_service(struct binder_state *bs, const uint16_t *s, size_t len, uid_t uid, pid_t spid) { //查询相应的服务 struct svcinfo *si = find_svc(s, len); if (!si || !si->handle) { return 0; } if (!si->allow_isolated) { uid_t appid = uid % AID_USER; //检查该服务是否允许孤立于进程而单独存在 if (appid >= AID_ISOLATED_START && appid <= AID_ISOLATED_END) { return 0; } } //服务是否满足查询条件 if (!svc_can_find(s, len, spid)) { return 0; } return si->handle; }
do_add_service 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 int do_add_service(struct binder_state *bs, const uint16_t *s, size_t len, uint32_t handle, uid_t uid, int allow_isolated, pid_t spid) { struct svcinfo *si; if (!handle || (len == 0) || (len > 127)) return -1; //权限检查 if (!svc_can_register(s, len, spid)) { return -1; } //服务检索 si = find_svc(s, len); if (si) { if (si->handle) { svcinfo_death(bs, si); //服务已注册时,释放相应的服务 } si->handle = handle; } else { si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t)); if (!si) { //内存不足,无法分配足够内存 return -1; } si->handle = handle; si->len = len; memcpy(si->name, s, (len + 1) * sizeof(uint16_t)); //内存拷贝服务信息 si->name[len] = '\0'; si->death.func = (void*) svcinfo_death; si->death.ptr = si; si->allow_isolated = allow_isolated; si->next = svclist; // svclist保存所有已注册的服务 svclist = si; } //以BC_ACQUIRE命令,handle为目标的信息,通过ioctl发送给binder驱动 binder_acquire(bs, handle); //以BC_REQUEST_DEATH_NOTIFICATION命令的信息,通过ioctl发送给binder驱动,主要用于清理内存等收尾工作。 binder_link_to_death(bs, handle, &si->death); return 0; }
注册服务的分以下3部分工作:
svc_can_register :检查权限,检查selinux权限是否满足;
find_svc :服务检索,根据服务名来查询匹配的服务;
svcinfo_death :释放服务,当查询到已存在同名的服务,则先清理该服务信息,再将当前的服务加入到服务列表svclist;
总结
ServiceManger集中管理系统内的所有服务,通过权限控制进程是否有权注册服务,通过字符串名称来查找对应的Service; 由于ServiceManger进程建立跟所有向其注册服务的死亡通知, 那么当服务所在进程死亡后, 会只需告知ServiceManager. 每个Client通过查询ServiceManager可获取Server进程的情况,降低所有Client进程直接检测会导致负载过重。
ServiceManager启动流程 :
打开binder驱动,并调用mmap()方法分配128k的内存映射空 间:binder_open();
通知binder驱动使其成为守护进程:binder_become_context_manager();
验证selinux权限,判断进程是否有权注册或查看指定服务;
进入循环状态,等待Client端的请求:binder_loop()。
注册服务的过程,根据服务名称,但同一个服务已注册,重新注册前会先移除之前的注册信息;
死亡通知: 当binder所在进程死亡后,会调用binder_release方法,然后调用binder_node_release.这个过程便会发出死亡通知的回调.
ServiceManager最核心的两个功能为查询和注册服务 :
注册服务:记录服务名和handle信息,保存到svclist列表;
查询服务:根据服务名查询相应的的handle信息。