1.先说一下android中的服务,一种是系统服务,系统服务通过getSystemService方法获得,所有的系统服务运行在一个进程中.
?
2.还有一种是自定义Service,也就是通过startService启动. 这种服务是单独一个进程.
?
Zygote实际上是一个进程繁殖器,通过socket的select模型进行繁殖.类似命令的方式来进行Fork.
下面绘制了其流程图.
?
可以看到:
启动完ServiceManager后,启动Zygote和SystemServer进程。
1.Zygote服务实际上是一种Select服务模型.
2.为了启动java代码,进行了一次androidRuntime的打开和关闭.
3.启动的SystemServer进程,此进程启动了一个线程注册了很多服务之后,开启了手机的HOME(也就是桌面),然后开始服务循环.(注意:此服务是Binder服务,Binder服务一启动就是俩线程。可能是因为是两个CPU吧。
代码如下:
4.Zygote进入select循环系统,开始服务.
5.此服务是很简单的,而且是通用的服务代码。(也就是取数据,传输数据,与SOCKET有些类似)。上层必须基于此服务原型来写相应的代码。
?
ProcessState::self()->startThreadPool();//启动一个。IPCThreadState::self()->joinThreadPool();//把此线程也加入。
?
case BR_TRANSACTION: { binder_transaction_data tr; result = mIn.read(&tr, sizeof(tr)); LOG_ASSERT(result == NO_ERROR, "Not enough command data for brTRANSACTION"); if (result != NO_ERROR) break; Parcel buffer; buffer.ipcSetDataReference( reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer), tr.data_size, reinterpret_cast<const size_t*>(tr.data.ptr.offsets), tr.offsets_size/sizeof(size_t), freeBuffer, this); const pid_t origPid = mCallingPid; const uid_t origUid = mCallingUid; mCallingPid = tr.sender_pid; mCallingUid = tr.sender_euid; int curPrio = getpriority(PRIO_PROCESS, mMyThreadId); if (gDisableBackgroundScheduling) { if (curPrio > ANDROID_PRIORITY_NORMAL) { // We have inherited a reduced priority from the caller, but do not // want to run in that state in this process. The driver set our // priority already (though not our scheduling class), so bounce // it back to the default before invoking the transaction. setpriority(PRIO_PROCESS, mMyThreadId, ANDROID_PRIORITY_NORMAL); } } else { if (curPrio >= ANDROID_PRIORITY_BACKGROUND) { // We want to use the inherited priority from the caller. // Ensure this thread is in the background scheduling class, // since the driver won't modify scheduling classes for us. // The scheduling group is reset to default by the caller // once this method returns after the transaction is complete. androidSetThreadSchedulingGroup(mMyThreadId, ANDROID_TGROUP_BG_NONINTERACT); } } //LOGI(">>>> TRANSACT from pid %d uid %d\n", mCallingPid, mCallingUid); Parcel reply; IF_LOG_TRANSACTIONS() { TextOutput::Bundle _b(alog); alog << "BR_TRANSACTION thr " << (void*)pthread_self() << " / obj " << tr.target.ptr << " / code " << TypeCode(tr.code) << ": " << indent << buffer << dedent << endl << "Data addr = " << reinterpret_cast<const uint8_t*>(tr.data.ptr.buffer) << ", offsets addr=" << reinterpret_cast<const size_t*>(tr.data.ptr.offsets) << endl; } if (tr.target.ptr) { sp<BBinder> b((BBinder*)tr.cookie); const status_t error = b->transact(tr.code, buffer, &reply, 0); if (error < NO_ERROR) reply.setError(error); } else { const status_t error = the_context_object->transact(tr.code, buffer, &reply, 0); if (error < NO_ERROR) reply.setError(error); } //LOGI("<<<< TRANSACT from pid %d restore pid %d uid %d\n", // mCallingPid, origPid, origUid); if ((tr.flags & TF_ONE_WAY) == 0) { LOG_ONEWAY("Sending reply to %d!", mCallingPid); sendReply(reply, 0); } else { LOG_ONEWAY("NOT sending reply to %d!", mCallingPid); } mCallingPid = origPid; mCallingUid = origUid; IF_LOG_TRANSACTIONS() { TextOutput::Bundle _b(alog); alog << "BC_REPLY thr " << (void*)pthread_self() << " / obj " << tr.target.ptr << ": " << indent << reply << dedent << endl; } } break;
?