当前位置: 代码迷 >> 综合 >> 启动 ServiceManager 进程
  详细解决方案

启动 ServiceManager 进程

热度:63   发布时间:2023-10-18 09:47:16.0

相关源码文件:

/system/core/rootdir/init.rc
/frameworks/native/cmds/servicemanager/service_manager.c
/frameworks/native/cmds/servicemanager/binder.c

ServiceManager 进程是由 init 进程通过解析 init.rc 文件而创建的

service servicemanager /system/bin/servicemanagerclass coreuser systemgroup systemcriticalonrestart restart healthdonrestart restart zygoteonrestart restart mediaonrestart restart surfaceflingeronrestart restart drm

对应找到 /frameworks/native/cmds/servicemanager/service_manager.c
源码文件中的 main 方法

int main(int argc, char **argv) {
    struct binder_state *bs;// 打开 binder 驱动,申请 128k 字节大小的内存空间 bs = binder_open(128*1024);...// 成为上下文管理者if (binder_become_context_manager(bs)) {
    return -1;}// selinux 权限是否使能selinux_enabled = is_selinux_enabled(); sehandle = selinux_android_service_context_handle();//通过函数 selinux_android_service_context_handle 获取sehandler指针,也就是得到selinux的操作对象selinux_status_open(true);if (selinux_enabled > 0) {
    if (sehandle == NULL) {
      // 无法获取 sehandleabort(); }if (getcon(&service_manager_context) != 0) {
    // 无法获取 service_manager 上下文abort(); }}...// 进入无限循环,处理 client 端发来的请求 binder_loop(bs, svcmgr_handler);return 0;
}

以上就是 ServiceManager 进程启动的三个阶段:

打开 binder 驱动:binder_open;
注册成为 binder 服务的大管家:binder_become_context_manager;
进入无限循环,处理 client 端发来的请求:binder_loop

  1. 打开 binder 驱动
struct binder_state *binder_open(size_t mapsize)
{
    struct binder_state *bs;struct binder_version vers;bs = malloc(sizeof(*bs));if (!bs) {
    errno = ENOMEM;return NULL;}// 打开 binder 驱动bs->fd = open("/dev/binder", O_RDWR);if (bs->fd < 0) {
    fprintf(stderr,"binder: cannot open device (%s)\n",strerror(errno));goto fail_open;}// 获取驱动版本,并判断版本是否一致if ((ioctl(bs->fd, BINDER_VERSION, &vers) == -1) ||(vers.protocol_version != BINDER_CURRENT_PROTOCOL_VERSION)) {
    fprintf(stderr,"binder: kernel driver version (%d) differs from user space version (%d)\n",vers.protocol_version, BINDER_CURRENT_PROTOCOL_VERSION);goto fail_open;}// 128k 字节大小的内存空间bs->mapsize = mapsize;// binder_mmap 内存映射bs->mapped = mmap(NULL, mapsize, PROT_READ, MAP_PRIVATE, bs->fd, 0);if (bs->mapped == MAP_FAILED) {
    fprintf(stderr,"binder: cannot map device (%s)\n",strerror(errno));goto fail_map;}return bs;
// 失败释放资源处理
fail_map:close(bs->fd);
fail_open:free(bs);return NULL;
}struct binder_state
{
    // dev/binder 的文件描述符int fd; // 指向 mmap 的内存地址void *mapped; // 分配的内存大小,默认为128KBsize_t mapsize; 
};

2.注册成为 binder 服务的大管家 :

int binder_become_context_manager(struct binder_state *bs)
{
    return ioctl(bs->fd, BINDER_SET_CONTEXT_MGR, 0);
}

ioctl 其实调用的是驱动层的 binder_ioctl 方法,其具体的实现我们到后面的文章再去分析。

3.进入无限循环,处理 client 端发来的请求

void binder_loop(struct binder_state *bs, binder_handler func)
{
    int res;struct binder_write_read bwr;uint32_t readbuf[32];bwr.write_size = 0;bwr.write_consumed = 0;bwr.write_buffer = 0;// 将 BC_ENTER_LOOPER 写入驱动,告诉驱动当前进程进入循环readbuf[0] = BC_ENTER_LOOPER;binder_write(bs, readbuf, sizeof(uint32_t));//将BC_ENTER_LOOPER写入binder驱动,这个最终调用的还是ioctl(bs->fd, BINDER_WRITE_READ, &bwr);for (;;) {
    bwr.read_size = sizeof(readbuf);bwr.read_consumed = 0;bwr.read_buffer = (uintptr_t) readbuf;// 不断的循环等待读取 binder 驱动的数据res  = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);if (res < 0) {
    ALOGE("binder_loop: ioctl failed (%s)\n", strerror(errno));break;}// 解析远程进程的 binder 驱动信息res = binder_parse(bs, 0, (uintptr_t) readbuf, bwr.read_consumed, func);if (res == 0) {
    ALOGE("binder_loop: unexpected reply?!\n");break;}if (res < 0) {
    ALOGE("binder_loop: io error %d %s\n", res, strerror(errno));break;}}
}======================================
BINDER_WRITE_READ是最重要的ioctl,它使用一个数据结构binder_write_read定义读写的数据。重要的数据结构
struct binder_write_read {
    signed long write_size;signed long write_consumed;unsigned long write_buffer;signed long read_size;signed long read_consumed;unsigned long read_buffer;
};int binder_write(struct binder_state *bs, void *data, size_t len)
{
    struct binder_write_read bwr;int res;// 代表写入数据大小,大小是 lenbwr.write_size = len;bwr.write_consumed = 0;// 写入命令 BC_ENTER_LOOPERbwr.write_buffer = (uintptr_t) data;// read_size = 0,表示不读取数据bwr.read_size = 0;bwr.read_consumed = 0;bwr.read_buffer = 0;// 把 binder_write_read 写入 binder 驱动res = ioctl(bs->fd, BINDER_WRITE_READ, &bwr);if (res < 0) {
    fprintf(stderr,"binder_write: ioctl failed (%s)\n",strerror(errno));}return res;
}// ptr 是读取数据的地址,是 bwr.read_buffer
int binder_parse(struct binder_state *bs, struct binder_io *bio, uintptr_t ptr, size_t size, binder_handler func)
{
    int r = 1;uintptr_t end = ptr + (uintptr_t) size;while (ptr < end) {
    uint32_t cmd = *(uint32_t *) ptr;ptr += sizeof(uint32_t);switch(cmd) {
    // 无操作,退出循环case BR_NOOP:  break;case BR_TRANSACTION_COMPLETE:break;case BR_INCREFS:case BR_ACQUIRE:case BR_RELEASE:case BR_DECREFS:ptr += sizeof(struct binder_ptr_cookie);break;case BR_TRANSACTION: {
    //txn即为结构体//binder_transaction_data {
    // target.handle = 0;// data.size; // data.ptr.buffer}struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;...binder_dump_txn(txn);if (func) {
    unsigned rdata[256/4];struct binder_io msg;struct binder_io reply;int res;// 创建回复的 replybio_init(&reply, rdata, sizeof(rdata), 4);// 从 txn 解析出 binder_io 信息,msg相当于data.ptr.buffer,即databio_init_from_txn(&msg, txn);// 调用解析回调函数 svcmgr_handler 【见下文】res = func(bs, txn, &msg, &reply);// 像 binder 驱动发送一个回复binder_send_reply(bs, &reply, txn->data.ptr.buffer, res);}ptr += sizeof(*txn);break;}case BR_REPLY: {
    struct binder_transaction_data *txn = (struct binder_transaction_data *) ptr;...binder_dump_txn(txn);if (bio) {
    bio_init_from_txn(bio, txn);bio = 0;}ptr += sizeof(*txn);r = 0;break;}case BR_DEAD_BINDER: {
    struct binder_death *death = (struct binder_death *)(uintptr_t) *(binder_uintptr_t *)ptr;ptr += sizeof(binder_uintptr_t);// binder 死亡消息 death->func(bs, death->ptr);break;}case BR_FAILED_REPLY:r = -1;break;case BR_DEAD_REPLY:r = -1;break;default:return -1;}}return r;
}

回到service_manager.c中的main方法,发现binder_loop(bs, svcmgr_handler);中的fun即svcmgr_handler

int main(int argc, char **argv)
{
    struct binder_state *bs;bs = binder_open(128*1024);if (!bs) {
    ALOGE("failed to open binder driver\n");return -1;}if (binder_become_context_manager(bs)) {
    ALOGE("cannot become context manager (%s)\n", strerror(errno));return -1;}selinux_enabled = is_selinux_enabled();sehandle = selinux_android_service_context_handle();selinux_status_open(true);if (selinux_enabled > 0) {
    if (sehandle == NULL) {
    ALOGE("SELinux: Failed to acquire sehandle. Aborting.\n");abort();}if (getcon(&service_manager_context) != 0) {
    ALOGE("SELinux: Failed to acquire service_manager context. Aborting.\n");abort();}}union selinux_callback cb;cb.func_audit = audit_callback;selinux_set_callback(SELINUX_CB_AUDIT, cb);cb.func_log = selinux_log_callback;selinux_set_callback(SELINUX_CB_LOG, cb);binder_loop(bs, svcmgr_handler);return 0;
}

svcmgr_handler方法中的入参txn为:
启动 ServiceManager 进程
msg为:
启动 ServiceManager 进程

int svcmgr_handler(struct binder_state *bs,struct binder_transaction_data *txn,struct binder_io *msg,struct binder_io *reply)
{
    struct svcinfo *si;uint16_t *s;size_t len;uint32_t handle;uint32_t strict_policy;int allow_isolated;//ALOGI("target=%p code=%d pid=%d uid=%d\n",// (void*) txn->target.ptr, txn->code, txn->sender_pid, txn->sender_euid);// 判断是不是要转给我的if (txn->target.ptr != BINDER_SERVICE_MANAGER)return -1;// PING_TRANSACTION,能不能找到我if (txn->code == PING_TRANSACTION)return 0;// 判断 code 是什么命令switch(txn->code) {
    // 查询获取 Service 服务命令case SVC_MGR_GET_SERVICE:case SVC_MGR_CHECK_SERVICE:// 要查询的服务名称s = bio_get_string16(msg, &len);if (s == NULL) {
    return -1;}// 从服务列表中寻找 handle 值handle = do_find_service(bs, s, len, txn->sender_euid, txn->sender_pid);if (!handle)break;// 把 handle 值写入回复数据bio_put_ref(reply, handle);return 0;// 添加服务到列表case SVC_MGR_ADD_SERVICE:// 获取服务的名称s = bio_get_string16(msg, &len);if (s == NULL) {
    return -1;}// 获取服务的 handle 的值handle = bio_get_ref(msg);// 执行添加服务到列表的逻辑if (do_add_service(bs, s, len, handle, txn->sender_euid,allow_isolated, txn->sender_pid))return -1;break;default:ALOGE("unknown code %d\n", txn->code);return -1;}bio_put_uint32(reply, 0);return 0;
}
============================================================
uint32_t bio_get_ref(struct binder_io *bio)
{
    struct flat_binder_object *obj;obj = _bio_get_obj(bio);if (!obj)return 0;//上层传过来的type是binder_type_binder,经过内部转成了BINDER_TYPE_HANDLE,这个if满足if (obj->type == BINDER_TYPE_HANDLE)return obj->handle;return 0;
}// 从服务列表中查找服务的 handle 值
uint32_t do_find_service(struct binder_state *bs, const uint16_t *s, size_t len, uid_t uid, pid_t spid)
{
    // 根据名称查找服务信息struct svcinfo *si = find_svc(s, len);// 找不到该服务if (!si || !si->handle) {
    return 0;}...// 返回服务的 handle 值return si->handle;
}struct svcinfo *find_svc(const uint16_t *s16, size_t len)
{
    struct svcinfo *si;for (si = svclist; si; si = si->next) {
    //当名字完全一致,则返回查询到的结果if ((len == si->len) &&!memcmp(s16, si->name, len * sizeof(uint16_t))) {
    return si;}}return NULL;
}int do_add_service(struct binder_state *bs,const uint16_t *s, size_t len,uint32_t handle, uid_t uid, int allow_isolated,pid_t spid)
{
    struct svcinfo *si;if (!handle || (len == 0) || (len > 127))return -1;//权限检查 if (!svc_can_register(s, len, spid)) {
    return -1;}//服务检索 si = find_svc(s, len);if (si) {
    if (si->handle) {
    // 服务已注册时,释放之前添加的相应服务svcinfo_death(bs, si); }si->handle = handle;} else {
    si = malloc(sizeof(*si) + (len + 1) * sizeof(uint16_t));// 内存不足,无法分配足够内存if (!si) {
      return -1;}// 指定 handle 值si->handle = handle;si->len = len;// 指定当前添加服务的名称memcpy(si->name, s, (len + 1) * sizeof(uint16_t)); si->name[len] = '\0';si->death.func = (void*) svcinfo_death;si->death.ptr = si;si->allow_isolated = allow_isolated;// svclist保存所有已注册的服务si->next = svclist; svclist = si;}//观察这个服务所在的进程是否死亡,若死亡就要释放// 以 BC_ACQUIRE 命令,handle 为目标的信息,通过 ioctl 发送给 binder 驱动binder_acquire(bs, handle);// 以 BC_REQUEST_DEATH_NOTIFICATION 命令的信息,通过 ioctl 发送给 binder 驱动,主要用于清理内存等收尾工作binder_link_to_death(bs, handle, &si->death);return 0;
}

其实serviceManager管理的是一个svcinfo的链表,表中有handle,通过handle可以找到服务
struct svcinfo
{
struct svcinfo *next;
uint32_t handle;
struct binder_death death;
int allow_isolated;
size_t len;
uint16_t name[0];
};

最后回顾 Media 服务的添加过程中的 addService 方法,最终会通过 binder 驱动跨进程执行 ServiceMananger 的 do_add_service 方法。
启动 ServiceManager 进程