Binder本身是C/S架構(gòu),就可能存在多個(gè)Client會同時(shí)訪問Server的情況。 在這種情況下,如果Server只有一個(gè)線程處理響應(yīng),就會導(dǎo)致客戶端的請求可能需要排隊(duì)而導(dǎo)致響應(yīng)過慢的現(xiàn)象發(fā)生。解決這個(gè)問題的方法就是引入多線程。【多個(gè)客戶端不同線程去請求,服務(wù)端需要使用多線程機(jī)制,binder線程池,創(chuàng)建多個(gè)線程去回復(fù)多個(gè)客戶端的請求】
Binder機(jī)制的設(shè)計(jì)從最底層–驅(qū)動層,就考慮到了對于多線程的支持。具體內(nèi)容如下:
a. 使用 Binder 的進(jìn)程在啟動之后,通過 BINDER_SET_MAX_THREADS 告知驅(qū)動其支持的最大線程數(shù)量
b. 驅(qū)動會對線程進(jìn)行管理。在 binder_proc 結(jié)構(gòu)中,這些字段記錄了進(jìn)程中線程的信息:max_threads,requested_threads,requested_threads_started,ready_threads
c. binder_thread 結(jié)構(gòu)對應(yīng)了 Binder 進(jìn)程中的線程
d. 驅(qū)動通過 BR_SPAWN_LOOPER 命令告知進(jìn)程需要創(chuàng)建一個(gè)新的線程
c. 進(jìn)程通過 BC_ENTER_LOOPER 命令告知驅(qū)動其主線程已經(jīng)ready
d. 進(jìn)程通過 BC_REGISTER_LOOPER 命令告知驅(qū)動其子線程(非主線程)已經(jīng)ready
e. 進(jìn)程通過 BC_EXIT_LOOPER 命令告知驅(qū)動其線程將要退出
f. 在線程退出之后,通過 BINDER_THREAD_EXIT 告知Binder驅(qū)動。驅(qū)動將對應(yīng)的 binder_thread 對象銷毀
1. 最大的binder 數(shù)量
在每個(gè)進(jìn)程啟動時(shí)候,都會創(chuàng)建 ProcessState 對象,獲得ProcessState對象是單例模式,從而保證每一個(gè)進(jìn)程只有一個(gè)ProcessState對象。因此一個(gè)進(jìn)程只打開binder設(shè)備一次,其中ProcessState的成員變量mDriverFD記錄binder驅(qū)動的fd,用于訪問binder設(shè)備。
?
/frameworks/native/libs/binder/ProcessState.cpp
// 在創(chuàng)建 ProcessState 對象的時(shí)候,會去打開driver
sp<ProcessState> ProcessState::self()
{
Mutex::Autolock _l(gProcessMutex);
if (gProcess != nullptr) {
return gProcess;
}
gProcess = new ProcessState(kDefaultDriver);
return gProcess;
}
// 打開驅(qū)動設(shè)備
static int open_driver(const char *driver)
{
int fd = open(driver, O_RDWR | O_CLOEXEC);
if (fd >= 0) {
int vers = 0;
status_t result = ioctl(fd, BINDER_VERSION, &vers);
if (result == -1) {
ALOGE("Binder ioctl to obtain version failed: %s", strerror(errno));
close(fd);
fd = -1;
}
if (result != 0 || vers != BINDER_CURRENT_PROTOCOL_VERSION) {
ALOGE("Binder driver protocol(%d) does not match user space protocol(%d)! ioctl() return value: %d",
vers, BINDER_CURRENT_PROTOCOL_VERSION, result);
close(fd);
fd = -1;
}
// 設(shè)置最大的線程數(shù)量為:15
// #define DEFAULT_MAX_BINDER_THREADS 15
size_t maxThreads = DEFAULT_MAX_BINDER_THREADS;
// 與binder 驅(qū)動交互,設(shè)置驅(qū)動的線程數(shù)量為 15個(gè)
result = ioctl(fd, BINDER_SET_MAX_THREADS, &maxThreads);
if (result == -1) {
ALOGE("Binder ioctl to set max threads failed: %s", strerror(errno));
}
} else {
ALOGW("Opening '%s' failed: %s\n", driver, strerror(errno));
}
return fd;
}
與binder 驅(qū)動交互,設(shè)置最大線程數(shù)量
/drivers/staging/android/binder.c
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
struct binder_proc *proc = filp->private_data;
struct binder_thread *thread;
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
trace_binder_ioctl(cmd, arg);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
if (ret)
goto err_unlocked;
binder_lock(__func__);
// 這里可以通過進(jìn)程獲取對應(yīng)的線程
thread = binder_get_thread(proc);
if (thread == NULL) {
ret = -ENOMEM;
goto err;
}
switch (cmd) {
。。。
case BINDER_SET_MAX_THREADS:
// 保存到 proc->max_threads
if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
ret = -EINVAL;
goto err;
}
break;
設(shè)置?binder_proc? 結(jié)構(gòu)體的 max_threads 為 15,結(jié)構(gòu)體為如下:
struct binder_proc {
struct hlist_node proc_node;
// 使用紅黑樹保存 threads 線程
struct rb_root threads;
// 進(jìn)程的pid 號
int pid;
// 進(jìn)程需要做的事項(xiàng)
struct list_head todo;
// 保存最大的線程數(shù)量
int max_threads;
// 請求的線程數(shù)量
int requested_threads;
int requested_threads_started;
int ready_threads;
};
2. binder 主線程的創(chuàng)建
進(jìn)程調(diào)用下列 startThreadPool 方法,去啟動binder 主線程
ProcessState::self()->startThreadPool();
/frameworks/native/libs/binder/ProcessState.cpp
void ProcessState::startThreadPool()
{
AutoMutex _l(mLock);
// mThreadPoolStarted 初始值為 false
if (!mThreadPoolStarted) {
// 設(shè)置為true,走到 spawnPooledThread(true)
mThreadPoolStarted = true;
spawnPooledThread(true);
}
}
走到 spawnPooledThread(true)
void ProcessState::spawnPooledThread(bool isMain)
{
// 如果沒有執(zhí)行:startThreadPool,則 mThreadPoolStarted 為false,不走下列的代碼
// 此時(shí)是為 true 的
if (mThreadPoolStarted) {
// 設(shè)置binder thread 的名字
String8 name = makeBinderThreadName();
ALOGV("Spawning new pooled thread, name=%s\n", name.string());
// 創(chuàng)建一個(gè)線程PoolThread,isMain 為true 表示是主線程
sp<Thread> t = new PoolThread(isMain);
// run 這個(gè)線程
t->run(name.string());
}
}
==========
String8 ProcessState::makeBinderThreadName() {
int32_t s = android_atomic_add(1, &mThreadPoolSeq);
pid_t pid = getpid();
String8 name;
// 主線程的binder 名字為:Binder:pid號_1,如:Binder:9320_1
name.appendFormat("Binder:%d_%X", pid, s);
return name;
}
創(chuàng)建一個(gè)線程PoolThread,isMain 為true 表示是主線程
PoolThread 繼承了 Thread:
/system/core/libutils/include/utils/threads.h
28 #include <utils/AndroidThreads.h>
29
30 #ifdef __cplusplus
31 #include <utils/Condition.h>
32 #include <utils/Errors.h>
33 #include <utils/Mutex.h>
34 #include <utils/RWLock.h>
35 #include <utils/Thread.h>
36 #endif
37
38 #endif // _LIBS_UTILS_THREADS_H
/system/core/libutils/Threads.cpp
status_t Thread::run(const char* name, int32_t priority, size_t stack)
{
LOG_ALWAYS_FATAL_IF(name == nullptr, "thread name not provided to Thread::run");
Mutex::Autolock _l(mLock);
if (mRunning) {
// thread already started
return INVALID_OPERATION;
}
// reset status and exitPending to their default value, so we can
// try again after an error happened (either below, or in readyToRun())
mStatus = OK;
mExitPending = false;
mThread = thread_id_t(-1);
// hold a strong reference on ourself
mHoldSelf = this;
mRunning = true;
bool res;
if (mCanCallJava) {
res = createThreadEtc(_threadLoop,
this, name, priority, stack, &mThread);
} else {
res = androidCreateRawThreadEtc(_threadLoop,
this, name, priority, stack, &mThread);
}
======
int Thread::_threadLoop(void* user)
{
Thread* const self = static_cast<Thread*>(user);
sp<Thread> strong(self->mHoldSelf);
wp<Thread> weak(strong);
self->mHoldSelf.clear();
#if defined(__ANDROID__)
// this is very useful for debugging with gdb
self->mTid = gettid();
#endif
bool first = true;
do {
bool result;
if (first) {
first = false;
self->mStatus = self->readyToRun();
result = (self->mStatus == OK);
if (result && !self->exitPending()) {
// Binder threads (and maybe others) rely on threadLoop
// running at least once after a successful ::readyToRun()
// (unless, of course, the thread has already been asked to exit
// at that point).
// This is because threads are essentially used like this:
// (new ThreadSubclass())->run();
// The caller therefore does not retain a strong reference to
// the thread and the thread would simply disappear after the
// successful ::readyToRun() call instead of entering the
// threadLoop at least once.
result = self->threadLoop();
}
} else {
result = self->threadLoop();
}
執(zhí)行run 方法,循環(huán)回調(diào)?threadLoop 方法
class PoolThread : public Thread
{
public:
explicit PoolThread(bool isMain)
: mIsMain(isMain)
{
}
protected:
virtual bool threadLoop()
{
// 一個(gè)線程只有一個(gè) IPCThreadState 實(shí)例,調(diào)用 joinThreadPool 方法
IPCThreadState::self()->joinThreadPool(mIsMain);
return false;
}
const bool mIsMain;
};
調(diào)用 joinThreadPool 方法
/frameworks/native/libs/binder/IPCThreadState.cpp
void IPCThreadState::joinThreadPool(bool isMain)
{
LOG_THREADPOOL("**** THREAD %p (PID %d) IS JOINING THE THREAD POOL\n", (void*)pthread_self(), getpid());
// 如果 isMain 為true ,則為 BC_ENTER_LOOPER
// false 為:BC_REGISTER_LOOPER
mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);
status_t result;
do {
processPendingDerefs();
// now get the next command to be processed, waiting if necessary
// talkwithdriver 去驅(qū)動設(shè)備交互,執(zhí)行命令
result = getAndExecuteCommand();
if (result < NO_ERROR && result != TIMED_OUT && result != -ECONNREFUSED && result != -EBADF) {
ALOGE("getAndExecuteCommand(fd=%d) returned unexpected error %d, aborting",
mProcess->mDriverFD, result);
abort();
}
if(result == TIMED_OUT && !isMain) {
break;
}
} while (result != -ECONNREFUSED && result != -EBADF);
binder 驅(qū)動對:BC_ENTER_LOOPER 的處理
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
trace_binder_command(cmd);
switch (cmd) {
。。。。。。。。
case BC_ENTER_LOOPER:
// 如果 thread->looper 有設(shè)置 binder 普通線程的值:BINDER_LOOPER_STATE_REGISTERED,則回復(fù)error
if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
proc->pid, thread->pid);
}
// 設(shè)置 thread->looper 為:BINDER_LOOPER_STATE_ENTERED:0x02, 位運(yùn)算來保存
thread->looper |= BINDER_LOOPER_STATE_ENTERED;
break;
// 如果是退出線程的話,則設(shè)置為:BINDER_LOOPER_STATE_EXITED 0x04
case BC_EXIT_LOOPER:
binder_debug(BINDER_DEBUG_THREADS,
"%d:%d BC_EXIT_LOOPER\n",
proc->pid, thread->pid);
thread->looper |= BINDER_LOOPER_STATE_EXITED;
break;
3. binder 普通線程的創(chuàng)建
線程池是在service端,用于響應(yīng)處理client端的眾多請求。binder線程池中的線程都是由Binder驅(qū)動來控制創(chuàng)建的。
創(chuàng)建binder 普通線程是由binder 驅(qū)動控制的,驅(qū)動通過 BR_SPAWN_LOOPER 命令告知進(jìn)程需要創(chuàng)建一個(gè)新的線程,然后進(jìn)程通過 BC_REGISTER_LOOPER 命令告知驅(qū)動其子線程(非主線程)已經(jīng)ready
service 端創(chuàng)建線程的2種情況:
- BC_TRANSACTION:client進(jìn)程向binderDriver發(fā)送IPC調(diào)用請求的時(shí)候。
- BC_REPLY:client進(jìn)程收到了binderDriver的IPC調(diào)用請求,邏輯執(zhí)行結(jié)束后發(fā)送返回值。
首先客戶端調(diào)用?IPCThreadState::transact:
-
客戶端進(jìn)程
/frameworks/native/libs/binder/IPCThreadState.cpp
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
status_t err;
flags |= TF_ACCEPT_FDS;
// 封裝data 值
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, nullptr);
if (reply) {
// 與binder 驅(qū)動交互 waitForResponse
err = waitForResponse(reply);
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
// 與binder 驅(qū)動交互 waitForResponse
status_t IPCThreadState::waitForResponse(Parcel *reply, status_t *acquireResult)
{
uint32_t cmd;
int32_t err;
while (1) {
if ((err=talkWithDriver()) < NO_ERROR) break;
err = mIn.errorCheck();
if (err < NO_ERROR) break;
if (mIn.dataAvail() == 0) continue;
cmd = (uint32_t)mIn.readInt32();
========================
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
if (mProcess->mDriverFD <= 0) {
return -EBADF;
}
binder_write_read bwr;
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
bwr.write_size = outAvail;
bwr.write_buffer = (uintptr_t)mOut.data();
// This is what we'll read.
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (uintptr_t)mIn.data();
} else {
bwr.read_size = 0;
bwr.read_buffer = 0;
}
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
#if defined(__ANDROID__)
// 與binder 驅(qū)動交互
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
與binder 驅(qū)動交互:BC_TRANSACTION
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
。。。。。
// 只有cmd 命令是 BC_TRANSACTION 和 BC_REPLY 才會調(diào)用 binder_transaction 函數(shù)
case BC_TRANSACTION:
case BC_REPLY: {
struct binder_transaction_data tr;
if (copy_from_user(&tr, ptr, sizeof(tr)))
return -EFAULT;
ptr += sizeof(tr);
binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
break;
}
只有cmd 命令是 BC_TRANSACTION 和 BC_REPLY 才會調(diào)用 binder_transaction 函數(shù)
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply)
{
struct binder_transaction *t;
struct binder_work *tcomplete;
binder_size_t *offp, *off_end;
binder_size_t off_min;
。。。。。。
}
// 設(shè)置工作類型為 BINDER_WORK_TRANSACTION,增加到工作的雙向鏈表中
t->work.type = BINDER_WORK_TRANSACTION;
list_add_tail(&t->work.entry, target_list);
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
list_add_tail(&tcomplete->entry, &thread->todo);
// 喚醒對應(yīng)的進(jìn)程處理
if (target_wait)
wake_up_interruptible(target_wait);
return;
-
service 服務(wù)端處理消息
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed, int non_block)
{
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
// 如果返回有 error,則有可能 執(zhí)行到 done
if (thread->return_error != BR_OK && ptr < end) {
if (thread->return_error2 != BR_OK) {
if (put_user(thread->return_error2, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
binder_stat_br(proc, thread, thread->return_error2);
if (ptr == end)
goto done;
thread->return_error2 = BR_OK;
}
if (put_user(thread->return_error, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
binder_stat_br(proc, thread, thread->return_error);
thread->return_error = BR_OK;
goto done;
}
。。。。。。
// 執(zhí)行while 循環(huán)
while (1) {
uint32_t cmd;
struct binder_transaction_data tr;
struct binder_work *w;
// 初始值 t 為 null
struct binder_transaction *t = NULL;
if (!list_empty(&thread->todo)) {
w = list_first_entry(&thread->todo, struct binder_work,
entry);
} else if (!list_empty(&proc->todo) && wait_for_proc_work) {
w = list_first_entry(&proc->todo, struct binder_work,
entry);
} else {
/* no data added */
if (ptr - buffer == 4 &&
!(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
goto retry;
break;
}
if (end - ptr < sizeof(tr) + 4)
break;
switch (w->type) {
// 執(zhí)行 BINDER_WORK_TRANSACTION,t 不為空,不走cotinue,回走到 done 分支
case BINDER_WORK_TRANSACTION: {
t = container_of(w, struct binder_transaction, work);
} break;
。。。。。
// 如果命令是 BR_DEAD_BINDER,也會走到 done
if (cmd == BR_DEAD_BINDER)
goto done; /* DEAD_BINDER notifications can cause transactions */
} break;
}
// 如果 t 為null,則執(zhí)行 continue,不退出循環(huán)
if (!t)
continue;
。。。
} else {
t->buffer->transaction = NULL;
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
break;
// 下列括號是 while 的括號
}
done:
*consumed = ptr - buffer;
// 需要滿足 3 個(gè)條件:
// 1. 當(dāng)前進(jìn)程沒有可請求的線程,也沒有已經(jīng)ready可用的線程
// 2. 啟動的線程要小于 15;
// 3. 對應(yīng)的client中的線程不能已經(jīng)啟動過。
if (proc->requested_threads + proc->ready_threads == 0 &&
proc->requested_threads_started < proc->max_threads &&
(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
/*spawn a new thread if we leave this out */) {
// 設(shè)置請求線程的數(shù)量 + 1
proc->requested_threads++;
// 拷貝 BR_SPAWN_LOOPER 到用戶空間
if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
return -EFAULT;
binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
}
return 0;
}
?當(dāng)發(fā)生以下3種情況之一,便會進(jìn)入done分支:
- 當(dāng)前線程的return_error發(fā)生error的情況;
- 當(dāng)Binder驅(qū)動向client端發(fā)送死亡通知的情況;
- 當(dāng)類型為BINDER_WORK_TRANSACTION(即收到命令是BC_TRANSACTION或BC_REPLY)的情況;
?線程的含義:
- ready_threads: 表示當(dāng)前線程池中有多少可用的空閑線程。
- requested_threads:請求開啟線程的數(shù)量。
- requested_threads_started:表示當(dāng)前已經(jīng)接受請求開啟的線程數(shù)量。
創(chuàng)建 Binder 普通線程的條件有3個(gè):
1. 當(dāng)前進(jìn)程沒有可請求的線程,也沒有已經(jīng)ready可用的線程
2. 啟動的線程要小于 15;
3. 對應(yīng)的client中的線程不能已經(jīng)啟動過
拷貝 BR_SPAWN_LOOPER 到用戶空間,執(zhí)行用戶空間的代碼創(chuàng)建普通線程:
/frameworks/native/libs/binder/IPCThreadState.cpp
status_t IPCThreadState::getAndExecuteCommand()
{
status_t result;
int32_t cmd;
result = talkWithDriver();
if (result >= NO_ERROR) {
size_t IN = mIn.dataAvail();
if (IN < sizeof(int32_t)) return result;
cmd = mIn.readInt32();
IF_LOG_COMMANDS() {
alog << "Processing top-level Command: "
<< getReturnString(cmd) << endl;
}
pthread_mutex_lock(&mProcess->mThreadCountLock);
mProcess->mExecutingThreadsCount++;
if (mProcess->mExecutingThreadsCount >= mProcess->mMaxThreads &&
mProcess->mStarvationStartTimeMs == 0) {
mProcess->mStarvationStartTimeMs = uptimeMillis();
}
pthread_mutex_unlock(&mProcess->mThreadCountLock);
result = executeCommand(cmd);
executeCommand
status_t IPCThreadState::executeCommand(int32_t cmd)
{
BBinder* obj;
RefBase::weakref_type* refs;
status_t result = NO_ERROR;
switch ((uint32_t)cmd) {
case BR_SPAWN_LOOPER:
mProcess->spawnPooledThread(false);
break;
執(zhí)行??mProcess->spawnPooledThread(false)
?/frameworks/native/libs/binder/ProcessState.cpp
void ProcessState::spawnPooledThread(bool isMain)
{
// isMain 為false
if (mThreadPoolStarted) {
// 設(shè)置binder 的名字:
String8 name = makeBinderThreadName();
ALOGV("Spawning new pooled thread, name=%s\n", name.string());
// 創(chuàng)建 PoolThread對象,指向run 方法
sp<Thread> t = new PoolThread(isMain);
t->run(name.string());
}
}
=======
String8 ProcessState::makeBinderThreadName() {
// 遞增加1
int32_t s = android_atomic_add(1, &mThreadPoolSeq);
pid_t pid = getpid();
String8 name;
// 這里為:Binder:9032_2
name.appendFormat("Binder:%d_%X", pid, s);
return name;
}
創(chuàng)建 PoolThread對象,指向run 方法
class PoolThread : public Thread
{
public:
explicit PoolThread(bool isMain)
: mIsMain(isMain)
{
}
protected:
virtual bool threadLoop()
{
IPCThreadState::self()->joinThreadPool(mIsMain);
return false;
}
const bool mIsMain;
};
又回到:IPCThreadState::self()->joinThreadPool(false),此?IPCThreadState 對象是個(gè)新的對象,與主線程的?IPCThreadState 是不同的。
/frameworks/native/libs/binder/IPCThreadState.cpp
void IPCThreadState::joinThreadPool(bool isMain)
{
// isMain 為false,與binder 驅(qū)動交互的命令是 BC_REGISTER_LOOPER
mOut.writeInt32(isMain ? BC_ENTER_LOOPER : BC_REGISTER_LOOPER);
status_t result;
do {
result = getAndExecuteCommand();
if (result < NO_ERROR && result != TIMED_OUT && result != -ECONNREFUSED && result != -EBADF) {
ALOGE("getAndExecuteCommand(fd=%d) returned unexpected error %d, aborting",
mProcess->mDriverFD, result);
abort();
}
// Let this thread exit the thread pool if it is no longer
// needed and it is not the main process thread.
// 如果普通線程返回的結(jié)果是 TIMED_OUT,則回收該普通線程
if(result == TIMED_OUT && !isMain) {
break;
}
} while (result != -ECONNREFUSED && result != -EBADF);
LOG_THREADPOOL("**** THREAD %p (PID %d) IS LEAVING THE THREAD POOL err=%d\n",
(void*)pthread_self(), getpid(), result);
// 通知binder 驅(qū)動線程退出了:BC_EXIT_LOOPER
mOut.writeInt32(BC_EXIT_LOOPER);
talkWithDriver(false);
}
isMain ?為false,與binder 驅(qū)動交互的命令是 BC_REGISTER_LOOPER
static int binder_thread_write(struct binder_proc *proc,
struct binder_thread *thread,
binder_uintptr_t binder_buffer, size_t size,
binder_size_t *consumed)
{
uint32_t cmd;
void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
while (ptr < end && thread->return_error == BR_OK) {
if (get_user(cmd, (uint32_t __user *)ptr))
return -EFAULT;
ptr += sizeof(uint32_t);
switch (cmd) {
。。。。。。。
case BC_REGISTER_LOOPER:
// 如果是主線程,則報(bào)錯(cuò)。
if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
proc->pid, thread->pid);
// 如果binder 驅(qū)動都沒有請求創(chuàng)建線程,則報(bào)錯(cuò)
} else if (proc->requested_threads == 0) {
thread->looper |= BINDER_LOOPER_STATE_INVALID;
binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
proc->pid, thread->pid);
} else {
// requested_threads減1, requested_threads_started開啟的線程增加為 1;
proc->requested_threads--;
proc->requested_threads_started++;
}
// 設(shè)置looper 模式
thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
break;
Binder機(jī)制中的收發(fā)消息及線程池 - 騰訊云開發(fā)者社區(qū)-騰訊云文章來源:http://www.zghlxwxcb.cn/news/detail-426785.html
進(jìn)程的Binder線程池工作過程-移動端開發(fā)文章來源地址http://www.zghlxwxcb.cn/news/detail-426785.html
到了這里,關(guān)于【安卓源碼】Binder機(jī)制3 -- Binder線程池的文章就介紹完了。如果您還想了解更多內(nèi)容,請?jiān)谟疑辖撬阉鱐OY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關(guān)文章,希望大家以后多多支持TOY模板網(wǎng)!