美文网首页
Thread start(1)

Thread start(1)

作者: 李发糕 | 来源:发表于2021-07-12 10:13 被阅读0次

    start

    public synchronized void start() {
        // Android-changed: throw if 'started' is true
        if (threadStatus != 0 || started)
            throw new IllegalThreadStateException();
        //通知所属线程组,添加进去。这里线程组会同时对unstart线程计数-1
        group.add(this);
    
        started = false;
        try {
            nativeCreate(this, stackSize, daemon);//调用native方法,这里android和java的实现略有不同,这个nativeCreate是android中的方法
            started = true;
        } finally {
            try {
                if (!started) {
                    group.threadStartFailed(this);//通知启动失败,线程组会移除此线程并对unstart线程计数+1
                }
            } catch (Throwable ignore) {
                /* do nothing. If start0 threw a Throwable then
                  it will be passed up the call stack */
            }
        }
    

    下面我们看一下nativeCreate的实现

    nativeCreate

    在java_lang_Thread.cc中看到对这个方法的注册

    NATIVE_METHOD(Thread, nativeCreate, "(Ljava/lang/Thread;JZ)V"),
    

    NATIVE_METHOD的定义:

    #define NATIVE_METHOD(className, functionName, signature) \
        { #functionName, signature, reinterpret_cast<void*>(className ## _ ## functionName) }
    

    在java_lang_Thread中,nativeCreate对应的方法为Thread_nativeCreate

    java_lang_Thread::Thread_nativeCreate

    static void Thread_nativeCreate(JNIEnv* env, jclass, jobject java_thread, jlong stack_size,
                                    jboolean daemon) {
      // There are sections in the zygote that forbid thread creation.
      Runtime* runtime = Runtime::Current();
      if (runtime->IsZygote() && runtime->IsZygoteNoThreadSection()) {//判断是否为zygote进程,且不可建立线程
        ...//抛出异常
        return;
      }
      Thread::CreateNativeThread(env, java_thread, stack_size, daemon == JNI_TRUE);//创建native层线程,同时传入了java层之前初始化的thread对象
    }
    

    Thread::CreateNativeThread

    此方法在thread.cc中
    这段代码我们分段分析

    void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) {
      CHECK(java_peer != nullptr);
      Thread* self = static_cast<JNIEnvExt*>(env)->GetSelf(); //获取当前jni环境线程
      ...
      Runtime* runtime = Runtime::Current();
    
      // Atomically start the birth of the thread ensuring the runtime isn't shutting down.
      bool thread_start_during_shutdown = false;
      {
        MutexLock mu(self, *Locks::runtime_shutdown_lock_);
        if (runtime->IsShuttingDownLocked()) {
          thread_start_during_shutdown = true;
        } else {
          runtime->StartThreadBirth();
        }
      }
      if (thread_start_during_shutdown) {
        ScopedLocalRef<jclass> error_class(env, env->FindClass("java/lang/InternalError"));
        env->ThrowNew(error_class.get(), "Thread starting during runtime shutdown");
        return;
      }
      ...
    }
    

    首先判断当前runtime虚拟机是否在shutdown状态,如果是的话则抛出异常,return。
    否则,调用runtime->StartThreadBirth()通知runtime虚拟机。这里runtime回对当前正在启动的线程计数+1。

    void StartThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) {
      threads_being_born_++;
    }
    

    继续看CreateNativeThread方法:

    {
      ...
      Thread* child_thread = new Thread(is_daemon);//创建native层Thread对象
      // Use global JNI ref to hold peer live while child thread starts.
      child_thread->tlsPtr_.jpeer = env->NewGlobalRef(java_peer);//将java Thread转换为全局引用给native Thread持有。
      stack_size = FixStackSize(stack_size);//根据传入的参数计算此线程需要的堆栈大小。以后我们会看看他的计算方式
    
      // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing
      // to assign it.
      env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer,
                        reinterpret_cast<jlong>(child_thread));//将native Thread指针赋值给java Thread的nativePeer属性。到这里,java和native层的thread对象才互相持有了
      ...
    }
    

    上面这一段没什么好说的,创建native thread对象。

    {
      ...
      // Try to allocate a JNIEnvExt for the thread. We do this here as we might be out of memory and
      // do not have a good way to report this on the child's side.
      std::string error_msg;
      std::unique_ptr<JNIEnvExt> child_jni_env_ext(
          JNIEnvExt::Create(child_thread, Runtime::Current()->GetJavaVM(), &error_msg));//创建此新线程的JNIEnvExt对象
    
      int pthread_create_result = 0;
      if (child_jni_env_ext.get() != nullptr) {//创建成功
        pthread_t new_pthread;
        pthread_attr_t attr;
        //设置为此线程的jni环境变量
        child_thread->tlsPtr_.tmp_jni_env = child_jni_env_ext.get();
        //调用几个pthread相关方法
        CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread");//pthread参数初始化
        CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED),//设置detachstate
                           "PTHREAD_CREATE_DETACHED");
        CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size);//设置堆栈大小
        pthread_create_result = pthread_create(&new_pthread,
                                               &attr,
                                               Thread::CreateCallback,
                                               child_thread);//创建底层pthread,在线程启动时执行Thread::CreateCallback
        CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread");//销毁参数
    
        if (pthread_create_result == 0) {//创建成功
          // pthread_create started the new thread. The child is now responsible for managing the
          // JNIEnvExt we created.
          // Note: we can't check for tmp_jni_env == nullptr, as that would require synchronization
          //       between the threads.
          child_jni_env_ext.release();//释放指针
          return;
        }
      }
      ...
    }
    

    在这段代码中设置了启动pthread的参数,最后通过Linux方法pthread_create创建了真正的线程。
    最后一段:

    {
      ...
      // Either JNIEnvExt::Create or pthread_create(3) failed, so clean up.
      {
        MutexLock mu(self, *Locks::runtime_shutdown_lock_);
        runtime->EndThreadBirth();
      }
      // Manually delete the global reference since Thread::Init will not have been run.
      env->DeleteGlobalRef(child_thread->tlsPtr_.jpeer);
      child_thread->tlsPtr_.jpeer = nullptr;
      delete child_thread;
      child_thread = nullptr;
      // TODO: remove from thread group?
      env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 0);
      {
        std::string msg(child_jni_env_ext.get() == nullptr ?
            StringPrintf("Could not allocate JNI Env: %s", error_msg.c_str()) :
            StringPrintf("pthread_create (%s stack) failed: %s",
                                     PrettySize(stack_size).c_str(), strerror(pthread_create_result)));
        ScopedObjectAccess soa(env);
        soa.Self()->ThrowOutOfMemoryError(msg.c_str());
      }
    }
    

    这段很简单,创建线程失败的情况下,释放资源,收拾现场。
    到这里,CreateNativeThread就结束了。我们在看一下被新创建出的线程执行的入口Thread::CreateCallback

    Thread::CreateCallback

    上文中调用pthread_create(&new_pthread,&attr,Thread::CreateCallback,child_thread);,指定了线程开始运行地址为Thread::CreateCallback,且参数为child_thread,即刚创建的native thread对象

    void* Thread::CreateCallback(void* arg) {//当此线程真正启动
      Thread* self = reinterpret_cast<Thread*>(arg);//从参数中获取对应的native thread对象
      Runtime* runtime = Runtime::Current();
      if (runtime == nullptr) {
        LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self;
        return nullptr;
      }
      {
        // TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true
        //       after self->Init().
        MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
        // Check that if we got here we cannot be shutting down (as shutdown should never have started
        // while threads are being born).
        CHECK(!runtime->IsShuttingDownLocked());
        // Note: given that the JNIEnv is created in the parent thread, the only failure point here is
        //       a mess in InitStackHwm. We do not have a reasonable way to recover from that, so abort
        //       the runtime in such a case. In case this ever changes, we need to make sure here to
        //       delete the tmp_jni_env, as we own it at this point.
        CHECK(self->Init(runtime->GetThreadList(), runtime->GetJavaVM(), self->tlsPtr_.tmp_jni_env));
        self->tlsPtr_.tmp_jni_env = nullptr;
        Runtime::Current()->EndThreadBirth();
      }
      ...
    }
    

    首先检测当前runtime以及本线程对应的nativeThread的状态。
    都就绪的话就移除临时的从父线程创建的jni环境以及EndThreadBirth通知runtime ThreadBirth计数-1。

    {
      ...
      {
        ScopedObjectAccess soa(self);
        self->InitStringEntryPoints();
    
        // Copy peer into self, deleting global reference when done.
        CHECK(self->tlsPtr_.jpeer != nullptr);
        self->tlsPtr_.opeer = soa.Decode<mirror::Object>(self->tlsPtr_.jpeer).Ptr();
        self->GetJniEnv()->DeleteGlobalRef(self->tlsPtr_.jpeer);
        self->tlsPtr_.jpeer = nullptr;
        self->SetThreadName(self->GetThreadName()->ToModifiedUtf8().c_str());//删除全局引用,设置一些属性
    
        ArtField* priorityField = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority);
        self->SetNativePriority(priorityField->GetInt(self->tlsPtr_.opeer));//设置native线程Priority
    
        runtime->GetRuntimeCallbacks()->ThreadStart(self);//通知runtime中注册的线程生命周期回调执行start,暂时不用关注
    
        // Invoke the 'run' method of our java.lang.Thread.
        ObjPtr<mirror::Object> receiver = self->tlsPtr_.opeer;
        jmethodID mid = WellKnownClasses::java_lang_Thread_run;
        ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
        InvokeVirtualOrInterfaceWithJValues(soa, ref.get(), mid, nullptr);//执行java thread的run方法
      }
      // Detach and delete self.
      Runtime::Current()->GetThreadList()->Unregister(self);
    
      return nullptr;
    }
    

    到这里,runtime方面的thread start就告于段落了,下面我们看pthread_create相关方法的实现

    pthread_create

    此方法在pthread_create.cpp中 我们分段看

    pthread_create(1)

    __BIONIC_WEAK_FOR_NATIVE_BRIDGE
    int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr,
                       void* (*start_routine)(void*), void* arg) {
      ErrnoRestorer errno_restorer;
    
      pthread_attr_t thread_attr;
      ...//如果没有传参则初始化
      pthread_internal_t* thread = NULL;
      void* child_stack = NULL;
      int result = __allocate_thread(&thread_attr, &thread, &child_stack);//分配空间,设置部分属性
      if (result != 0) {
        return result;
      }
      ...
    }
    
    __allocate_thread
    static int __allocate_thread(pthread_attr_t* attr, pthread_internal_t** threadp, void** child_stack) {
      size_t mmap_size;
      uint8_t* stack_top;
    
      if (attr->stack_base == NULL) {//调用者未提供此线程要用的栈,所以在这里分配一块
        // The caller didn't provide a stack, so allocate one.
        // Make sure the stack size and guard size are multiples of PAGE_SIZE.
        if (__builtin_add_overflow(attr->stack_size, attr->guard_size, &mmap_size)) return EAGAIN;
        if (__builtin_add_overflow(mmap_size, sizeof(pthread_internal_t), &mmap_size)) return EAGAIN;
        //需要分配内存的大小:stack_size + guard_size + sizeof(pthread_internal_t)
        //按页大小对齐
        mmap_size = __BIONIC_ALIGN(mmap_size, PAGE_SIZE);
        attr->guard_size = __BIONIC_ALIGN(attr->guard_size, PAGE_SIZE);//guard_size为堆栈保护区,按页大小对齐
        attr->stack_base = __create_thread_mapped_space(mmap_size, attr->guard_size);//mmap映射一块内存做stack
        if (attr->stack_base == NULL) {
          return EAGAIN;
        }
        stack_top = reinterpret_cast<uint8_t*>(attr->stack_base) + mmap_size;//计算初始栈顶位置为这块内存的最后
      } else {
        // Remember the mmap size is zero and we don't need to free it.
        mmap_size = 0;
        stack_top = reinterpret_cast<uint8_t*>(attr->stack_base) + attr->stack_size;//同上
      }
    
      // Mapped space(or user allocated stack) is used for:
      //   pthread_internal_t
      //   thread stack (including guard)
      //当前栈顶位置 = (内存末尾 - sizeof(pthread_internal_t))后按16位对齐
      // To safely access the pthread_internal_t and thread stack, we need to find a 16-byte aligned boundary.
      stack_top = reinterpret_cast<uint8_t*>(
                    (reinterpret_cast<uintptr_t>(stack_top) - sizeof(pthread_internal_t)) & ~0xf);
    
      pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(stack_top);//末端刚刚的预留部分用于存储pthread_internal_t结构体
      if (mmap_size == 0) {//此段内存是调用者提供
        // If thread was not allocated by mmap(), it may not have been cleared to zero.
        // So assume the worst and zero it.
        memset(thread, 0, sizeof(pthread_internal_t));//清理最后给pthread_internal_t预留的空间
      }
      attr->stack_size = stack_top - reinterpret_cast<uint8_t*>(attr->stack_base);//计算出最终的栈大小
    
      thread->mmap_size = mmap_size;
      thread->attr = *attr;//保存对应参数
      //初始化此线程tls
      if (!__init_tls(thread)) {//如果失败
        if (thread->mmap_size != 0) munmap(thread->attr.stack_base, thread->mmap_size);//如果是mmap分配的内存,取消这段映射
        return EAGAIN;
      }
      __init_thread_stack_guard(thread);//初始化线程栈的guard
    
      *threadp = thread;//返回pthread_internal_t
      *child_stack = stack_top;//返回线程堆栈栈顶位置
      return 0;
    }
    

    上面这段代码,我们可以看出pthread分配的空间的结构:
    |堆栈延伸方向 <----- 线程栈----当前空栈栈顶|pthread_internal_t结构体|
    可见线程堆栈是从高向低延伸的。
    下面看一下通过mmap映射这段空间的过程

    __create_thread_mapped_space
    static void* __create_thread_mapped_space(size_t mmap_size, size_t stack_guard_size) {
      // Create a new private anonymous map.
      int prot = PROT_READ | PROT_WRITE;
      int flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE;
      void* space = mmap(NULL, mmap_size, prot, flags, -1, 0);//映射一段申请的空间
      ...//失败,return
    
      // Stack is at the lower end of mapped space, stack guard region is at the lower end of stack.
      // Set the stack guard region to PROT_NONE, so we can detect thread stack overflow.
      if (mprotect(space, stack_guard_size, PROT_NONE) == -1) {//在低位也就是空间起始位置设置PROT_NONE,检测堆栈溢出
        //失败,释放空间
      }
      prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, space, stack_guard_size, "thread stack guard");//给这段设置了PROT_NONE的保护区设置vma名
      return space;
    }
    

    由此可见,实际空间结构:
    |thread stack guard|堆栈延伸方向 <----- 线程栈----当前空栈栈顶|pthread_internal_t结构体|
    到这里,线程需要的内存空间申请好了。我们继续回到pthread_create方法中看。

    pthread_create (2):

    __BIONIC_WEAK_FOR_NATIVE_BRIDGE
    int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr,
                       void* (*start_routine)(void*), void* arg) {
      ...//申请并配置一块内存,刚才看过了
    
      // Create a lock for the thread to wait on once it starts so we can keep
      // it from doing anything until after we notify the debugger about it
      //
      // This also provides the memory barrier we need to ensure that all
      // memory accesses previously performed by this thread are visible to
      // the new thread.
      thread->startup_handshake_lock.init(false);//初始化一个锁并锁住
      thread->startup_handshake_lock.lock();
    
      thread->start_routine = start_routine;//设置线程入口位置
      thread->start_routine_arg = arg;//设置入口函数需要的参数
    
      thread->set_cached_pid(getpid());//设置当前进程号设
    
      int flags = CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM |
          CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID;//配置参数
      void* tls = reinterpret_cast<void*>(thread->tls);
    
      ...//在x86上的额外操作,暂时忽略
    
      int rc = clone(__pthread_start, child_stack, flags, thread, &(thread->tid), tls, &(thread->tid));//调用clone方法创建进程
      if (rc == -1) {
        //clone失败,收尸
        return clone_errno;
      }
      ...
    }
    

    上面的代码在给thread初始化和设置了一些参数后,调用clone创建了一个进程。
    下面我们看一下clone方法

    clone

    贴上刚刚调用时传入的flag
    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM | CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID

    __BIONIC_WEAK_FOR_NATIVE_BRIDGE
    int clone(int (*fn)(void*), void* child_stack, int flags, void* arg, ...) {
      int* parent_tid = nullptr;
      void* new_tls = nullptr;
      int* child_tid = nullptr;
    
      if (fn != nullptr && child_stack == nullptr) {
        errno = EINVAL;
        return -1;
      }
    
      // Extract any optional parameters required by the flags.
      va_list args;
      va_start(args, arg);
      if ((flags & (CLONE_PARENT_SETTID|CLONE_SETTLS|CLONE_CHILD_SETTID|CLONE_CHILD_CLEARTID)) != 0) {
        parent_tid = va_arg(args, int*);
      }
      if ((flags & (CLONE_SETTLS|CLONE_CHILD_SETTID|CLONE_CHILD_CLEARTID)) != 0) {
        new_tls = va_arg(args, void*);
      }
      if ((flags & (CLONE_CHILD_SETTID|CLONE_CHILD_CLEARTID)) != 0) {
        child_tid = va_arg(args, int*);
      }
      va_end(args);//根据flag从变长参数中获取对应的参数
    
      // Align 'child_stack' to 16 bytes.
      uintptr_t child_stack_addr = reinterpret_cast<uintptr_t>(child_stack);//新线程堆栈栈顶位置
      child_stack_addr &= ~0xf;//按照16位向下对齐
      child_stack = reinterpret_cast<void*>(child_stack_addr);
    
      //下面两段代码,暂时将当前线程的tid和pid缓存后置空
      // Remember the parent pid and invalidate the cached value while we clone.
      pthread_internal_t* self = __get_thread();
      pid_t parent_pid = self->invalidate_cached_pid();
    
      // Remmber the caller's tid so that it can be restored in the parent after clone.
      pid_t caller_tid = self->tid;
      // Invalidate the tid before the syscall. The value is lazily cached in gettid(),
      // and it will be updated by fork() and pthread_create(). We don't do this if
      // we are sharing address space with the child.
      if (!(flags & (CLONE_VM|CLONE_VFORK))) {
        self->tid = -1;
      }
    
      // Actually do the clone.
      int clone_result;
      if (fn != nullptr) {
        clone_result = __bionic_clone(flags, child_stack, parent_tid, new_tls, child_tid, fn, arg);//clone线程
      } else {
        ...
      }
    
      //恢复刚才置空的当前线程id
      if (clone_result != 0) {
        // We're the parent, so put our known pid and tid back in place.
        // We leave the child without a cached pid and tid, but:
        // 1. pthread_create gives its children their own pthread_internal_t with the correct pid and tid.
        // 2. fork uses CLONE_CHILD_SETTID to get the new pid/tid.
        // 3. The tid is lazily fetched in gettid().
        // If any other cases become important, we could use a double trampoline like __pthread_start.
        self->set_cached_pid(parent_pid);
        self->tid = caller_tid;
      } else if (self->tid == -1) {
        self->tid = syscall(__NR_gettid);
        self->set_cached_pid(self->tid);
      }
    
      return clone_result;
    }
    

    上面调用的__bionic_clone及其内部执行的系统调用clone,我们下一篇文章再看。
    继续回到pthread_create

    pthread_create(3)

    int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr,
                       void* (*start_routine)(void*), void* arg) {
      ...//上面看过的,对应的新进程创建完毕
      int init_errno = __init_thread(thread);//初始化新线程结构体
      if (init_errno != 0) {//失败,但是新进程已经创建了,不能直接收尸
         // Mark the thread detached and replace its start_routine with a no-op.
         // Letting the thread run is the easiest way to clean up its resources.
         atomic_store(&thread->join_state, THREAD_DETACHED);//设置线程join_state为DETACHED
         __pthread_internal_add(thread);//将新线程结构体插入线程列表中
         thread->start_routine = __do_nothing;//所以设置线程入口啥也不干
         thread->startup_handshake_lock.unlock();//解锁
         return init_errno;
      }
      //初始化成功,正常使用
      // Publish the pthread_t and unlock the mutex to let the new thread start running.
      *thread_out = __pthread_internal_add(thread);
      thread->startup_handshake_lock.unlock();//本线程要做的都做完了,解锁
    
      return 0;
    }
    
    __init_thread
    int __init_thread(pthread_internal_t* thread) {
      thread->cleanup_stack = nullptr;
    
      if (__predict_true((thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) == 0)) {//根据之前创建线程传入的参数flag决定此线程的join_state
        atomic_init(&thread->join_state, THREAD_NOT_JOINED);
      } else {
        atomic_init(&thread->join_state, THREAD_DETACHED);
      }
    
      // Set the scheduling policy/priority of the thread if necessary.
      bool need_set = true;
      int policy;
      sched_param param;
      if ((thread->attr.flags & PTHREAD_ATTR_FLAG_INHERIT) != 0) {//是否要继承当前线程的调度策略
        // Unless the parent has SCHED_RESET_ON_FORK set, we've already inherited from the parent.
        policy = sched_getscheduler(0);//获取当前线程的调度策略
        need_set = ((policy & SCHED_RESET_ON_FORK) != 0);//是否需要重新设置子进程的优先级
        if (need_set) {
          if (policy == -1) {
            async_safe_format_log(ANDROID_LOG_WARN, "libc",
                                  "pthread_create sched_getscheduler failed: %s", strerror(errno));
            return errno;
          }
          if (sched_getparam(0, &param) == -1) {
            async_safe_format_log(ANDROID_LOG_WARN, "libc",
                                  "pthread_create sched_getparam failed: %s", strerror(errno));
            return errno;
          }
        }
      } else {
        policy = thread->attr.sched_policy;//使用参数指定的策略和优先级
        param.sched_priority = thread->attr.sched_priority;
      }
      // Backwards compatibility: before P, Android didn't have pthread_attr_setinheritsched,
      // and our behavior was neither of the POSIX behaviors.
      if ((thread->attr.flags & (PTHREAD_ATTR_FLAG_INHERIT|PTHREAD_ATTR_FLAG_EXPLICIT)) == 0) {//如果参数中并未制定继承属性
        need_set = (thread->attr.sched_policy != SCHED_NORMAL);//指定的调度策略并非NORMAL
      }
      if (need_set) {
        if (sched_setscheduler(thread->tid, policy, &param) == -1) {//设置新线程的调度策略
          async_safe_format_log(ANDROID_LOG_WARN, "libc",
                                "pthread_create sched_setscheduler(%d, {%d}) call failed: %s", policy,
                                param.sched_priority, strerror(errno));
    #if defined(__LP64__)
          // For backwards compatibility reasons, we only report failures on 64-bit devices.
          return errno;
    #endif
        }
      }
    
      return 0;
    }
    
    __pthread_internal_add

    新线程入队

    pthread_t __pthread_internal_add(pthread_internal_t* thread) {
      ScopedWriteLock locker(&g_thread_list_lock);
    
      // We insert at the head.
      thread->next = g_thread_list;
      thread->prev = nullptr;
      if (thread->next != nullptr) {
        thread->next->prev = thread;
      }
      g_thread_list = thread;
      return reinterpret_cast<pthread_t>(thread);
    }
    

    插入链表头
    到这里,pthread_create方法基本分析完毕了,回头来看,在pthread_create调用clone创建新进程的时候,传入的新进程开始地址并非在外层传入的start_routine,而是__pthread_start。
    我们分析一下这个方法

    __pthread_start

    static int __pthread_start(void* arg) {
      pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(arg);
    
      // Wait for our creating thread to release us. This lets it have time to
      // notify gdb about this thread before we start doing anything.
      // This also provides the memory barrier needed to ensure that all memory
      // accesses previously made by the creating thread are visible to us.
      thread->startup_handshake_lock.lock();//等待之前的线程配置
    
      __init_alternate_signal_stack(thread);//初始化专用于处理信号的栈
    
      void* result = thread->start_routine(thread->start_routine_arg);//执行之前传进来的入口函数以及参数传递。这里就会执行我们之前提到的Thread::CreateCallback方法了
      pthread_exit(result);//退出线程
    
      return 0;
    }
    
    __init_alternate_signal_stack
    void __init_alternate_signal_stack(pthread_internal_t* thread) {
      // Create and set an alternate signal stack.
      void* stack_base = mmap(NULL, SIGNAL_STACK_SIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);//映射一块内存
      if (stack_base != MAP_FAILED) {
        // Create a guard to catch stack overflows in signal handlers.
        if (mprotect(stack_base, PTHREAD_GUARD_SIZE, PROT_NONE) == -1) {//设置溢出保护区
          munmap(stack_base, SIGNAL_STACK_SIZE);
          return;
        }
        stack_t ss;
        ss.ss_sp = reinterpret_cast<uint8_t*>(stack_base) + PTHREAD_GUARD_SIZE;
        ss.ss_size = SIGNAL_STACK_SIZE - PTHREAD_GUARD_SIZE;
        ss.ss_flags = 0;
        sigaltstack(&ss, NULL);
        thread->alternate_signal_stack = stack_base;//转化成sigaltstack设置给thread
    
        // We can only use const static allocated string for mapped region name, as Android kernel
        // uses the string pointer directly when dumping /proc/pid/maps.
        prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, ss.ss_sp, ss.ss_size, "thread signal stack");//匿名空间起名
        prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, stack_base, PTHREAD_GUARD_SIZE, "thread signal stack guard");
      }
    }
    

    退出线程的方法后面再看。到此为止,thread.start方法从Java层到native层在用户态的调用就看个差不多了。下一篇简单分析线程调用clone方法在内核态的实现以及和创建进程的区别。

    相关文章

      网友评论

          本文标题:Thread start(1)

          本文链接:https://www.haomeiwen.com/subject/redrpltx.html