pthread_mutex_lock的实现!!

时间:2022-01-31 14:09:56

找了好久的pthread_mutex_lock函数的实现原理,,现粘贴如下。。。




int
__pthread_mutex_lock (mutex)
     pthread_mutex_t *mutex;
{
  assert (sizeof (mutex->__size) >= sizeof (mutex->__data));

  int oldval;
  pid_t id = THREAD_GETMEM (THREAD_SELF, tid);

  int retval = 0;
  switch (__builtin_expect (mutex->__data.__kind, PTHREAD_MUTEX_TIMED_NP))
    {
      /* Recursive mutex.  */
    case PTHREAD_MUTEX_RECURSIVE_NP:
      /* Check whether we already hold the mutex.  */
      if (mutex->__data.__owner == id)
    {
      /* Just bump the counter.  */
      if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
        /* Overflow of the counter.  */
        return EAGAIN;

      ++mutex->__data.__count;

      return 0;
    }

      /* We have to get the mutex.  */
      LLL_MUTEX_LOCK (mutex->__data.__lock);

      assert (mutex->__data.__owner == 0);
      mutex->__data.__count = 1;
      break;

      /* Error checking mutex.  */
    case PTHREAD_MUTEX_ERRORCHECK_NP:
      /* Check whether we already hold the mutex.  */
      if (__builtin_expect (mutex->__data.__owner == id, 0))
    return EDEADLK;

      /* FALLTHROUGH */

    case PTHREAD_MUTEX_TIMED_NP:
    simple:
      /* Normal mutex.  */
      LLL_MUTEX_LOCK (mutex->__data.__lock);
      assert (mutex->__data.__owner == 0);
      break;

    case PTHREAD_MUTEX_ADAPTIVE_NP:
      if (! __is_smp)
    goto simple;

      if (LLL_MUTEX_TRYLOCK (mutex->__data.__lock) != 0)
    {
      int cnt = 0;
      int max_cnt = MIN (MAX_ADAPTIVE_COUNT,
                 mutex->__data.__spins * 2 + 10);
      do
        {
          if (cnt++ >= max_cnt)
        {
          LLL_MUTEX_LOCK (mutex->__data.__lock);
          break;
        }

#ifdef BUSY_WAIT_NOP
          BUSY_WAIT_NOP;
#endif
        }
      while (LLL_MUTEX_TRYLOCK (mutex->__data.__lock) != 0);

      mutex->__data.__spins += (cnt - mutex->__data.__spins) / 8;
    }
      assert (mutex->__data.__owner == 0);
      break;

    case PTHREAD_MUTEX_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_ROBUST_ADAPTIVE_NP:
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
             &mutex->__data.__list.__next);

      oldval = mutex->__data.__lock;
      do
    {
    again:
      if ((oldval & FUTEX_OWNER_DIED) != 0)
        {
          /* The previous owner died.  Try locking the mutex.  */
          int newval = id;
#ifdef NO_INCR
          newval |= FUTEX_WAITERS;
#else
          newval |= (oldval & FUTEX_WAITERS);
#endif

          newval
        = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                               newval, oldval);

          if (newval != oldval)
        {
          oldval = newval;
          goto again;
        }

          /* We got the mutex.  */
          mutex->__data.__count = 1;
          /* But it is inconsistent unless marked otherwise.  */
          mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

          ENQUEUE_MUTEX (mutex);
          THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

          /* Note that we deliberately exit here.  If we fall
         through to the end of the function __nusers would be
         incremented which is not correct because the old
         owner has to be discounted.  If we are not supposed
         to increment __nusers we actually have to decrement
         it here.  */
#ifdef NO_INCR
          --mutex->__data.__nusers;
#endif

          return EOWNERDEAD;
        }

      /* Check whether we already hold the mutex.  */
      if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
        {
          if (mutex->__data.__kind
          == PTHREAD_MUTEX_ROBUST_ERRORCHECK_NP)
        {
          THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                 NULL);
          return EDEADLK;
        }

          if (mutex->__data.__kind
          == PTHREAD_MUTEX_ROBUST_RECURSIVE_NP)
        {
          THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
                 NULL);

          /* Just bump the counter.  */
          if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
            /* Overflow of the counter.  */
            return EAGAIN;

          ++mutex->__data.__count;

          return 0;
        }
        }

      oldval = LLL_ROBUST_MUTEX_LOCK (mutex->__data.__lock, id);

      if (__builtin_expect (mutex->__data.__owner
                == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
        {
          /* This mutex is now not recoverable.  */
          mutex->__data.__count = 0;
          lll_mutex_unlock (mutex->__data.__lock);
          THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
          return ENOTRECOVERABLE;
        }
    }
      while ((oldval & FUTEX_OWNER_DIED) != 0);

      mutex->__data.__count = 1;
      ENQUEUE_MUTEX (mutex);
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
      break;

    case PTHREAD_MUTEX_PI_RECURSIVE_NP:
    case PTHREAD_MUTEX_PI_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ADAPTIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_RECURSIVE_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PI_ROBUST_NORMAL_NP:
    case PTHREAD_MUTEX_PI_ROBUST_ADAPTIVE_NP:
      {
    int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;
    int robust = mutex->__data.__kind & PTHREAD_MUTEX_ROBUST_NORMAL_NP;

    if (robust)
      /* Note: robust PI futexes are signaled by setting bit 0.  */
      THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending,
             (void *) (((uintptr_t) &mutex->__data.__list.__next)
                   | 1));

    oldval = mutex->__data.__lock;

    /* Check whether we already hold the mutex.  */
    if (__builtin_expect ((oldval & FUTEX_TID_MASK) == id, 0))
      {
        if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
          {
        THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
        return EDEADLK;
          }

        if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
          {
        THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

        /* Just bump the counter.  */
        if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
          /* Overflow of the counter.  */
          return EAGAIN;

        ++mutex->__data.__count;

        return 0;
          }
      }

    int newval = id;
#ifdef NO_INCR
    newval |= FUTEX_WAITERS;
#endif
    oldval = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                              newval, 0);

    if (oldval != 0)
      {
        /* The mutex is locked.  The kernel will now take care of
           everything.  */
        INTERNAL_SYSCALL_DECL (__err);
        int e = INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
                      FUTEX_LOCK_PI, 1, 0);

        if (INTERNAL_SYSCALL_ERROR_P (e, __err)
        && (INTERNAL_SYSCALL_ERRNO (e, __err) == ESRCH
            || INTERNAL_SYSCALL_ERRNO (e, __err) == EDEADLK))
          {
        assert (INTERNAL_SYSCALL_ERRNO (e, __err) != EDEADLK
            || (kind != PTHREAD_MUTEX_ERRORCHECK_NP
                && kind != PTHREAD_MUTEX_RECURSIVE_NP));
        /* ESRCH can happen only for non-robust PI mutexes where
           the owner of the lock died.  */
        assert (INTERNAL_SYSCALL_ERRNO (e, __err) != ESRCH || !robust);

        /* Delay the thread indefinitely.  */
        while (1)
          pause_not_cancel ();
          }

        oldval = mutex->__data.__lock;

        assert (robust || (oldval & FUTEX_OWNER_DIED) == 0);
      }

    if (__builtin_expect (oldval & FUTEX_OWNER_DIED, 0))
      {
        atomic_and (&mutex->__data.__lock, ~FUTEX_OWNER_DIED);

        /* We got the mutex.  */
        mutex->__data.__count = 1;
        /* But it is inconsistent unless marked otherwise.  */
        mutex->__data.__owner = PTHREAD_MUTEX_INCONSISTENT;

        ENQUEUE_MUTEX_PI (mutex);
        THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);

        /* Note that we deliberately exit here.  If we fall
           through to the end of the function __nusers would be
           incremented which is not correct because the old owner
           has to be discounted.  If we are not supposed to
           increment __nusers we actually have to decrement it here.  */
#ifdef NO_INCR
        --mutex->__data.__nusers;
#endif

        return EOWNERDEAD;
      }

    if (robust
        && __builtin_expect (mutex->__data.__owner
                 == PTHREAD_MUTEX_NOTRECOVERABLE, 0))
      {
        /* This mutex is now not recoverable.  */
        mutex->__data.__count = 0;

        INTERNAL_SYSCALL_DECL (__err);
        INTERNAL_SYSCALL (futex, __err, 4, &mutex->__data.__lock,
                  FUTEX_UNLOCK_PI, 0, 0);

        THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
        return ENOTRECOVERABLE;
      }

    mutex->__data.__count = 1;
    if (robust)
      {
        ENQUEUE_MUTEX_PI (mutex);
        THREAD_SETMEM (THREAD_SELF, robust_head.list_op_pending, NULL);
      }
      }
      break;

    case PTHREAD_MUTEX_PP_RECURSIVE_NP:
    case PTHREAD_MUTEX_PP_ERRORCHECK_NP:
    case PTHREAD_MUTEX_PP_NORMAL_NP:
    case PTHREAD_MUTEX_PP_ADAPTIVE_NP:
      {
    int kind = mutex->__data.__kind & PTHREAD_MUTEX_KIND_MASK_NP;

    oldval = mutex->__data.__lock;

    /* Check whether we already hold the mutex.  */
    if (mutex->__data.__owner == id)
      {
        if (kind == PTHREAD_MUTEX_ERRORCHECK_NP)
          return EDEADLK;

        if (kind == PTHREAD_MUTEX_RECURSIVE_NP)
          {
        /* Just bump the counter.  */
        if (__builtin_expect (mutex->__data.__count + 1 == 0, 0))
          /* Overflow of the counter.  */
          return EAGAIN;

        ++mutex->__data.__count;

        return 0;
          }
      }

    int oldprio = -1, ceilval;
    do
      {
        int ceiling = (oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK)
              >> PTHREAD_MUTEX_PRIO_CEILING_SHIFT;

        if (__pthread_current_priority () > ceiling)
          {
        if (oldprio != -1)
          __pthread_tpp_change_priority (oldprio, -1);
        return EINVAL;
          }

        retval = __pthread_tpp_change_priority (oldprio, ceiling);
        if (retval)
          return retval;

        ceilval = ceiling << PTHREAD_MUTEX_PRIO_CEILING_SHIFT;
        oldprio = ceiling;

        oldval
          = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
#ifdef NO_INCR
                             ceilval | 2,
#else
                             ceilval | 1,
#endif
                             ceilval);

        if (oldval == ceilval)
          break;

        do
          {
        oldval
          = atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                             ceilval | 2,
                             ceilval | 1);

        if ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval)
          break;

        if (oldval != ceilval)
          lll_futex_wait (&mutex->__data.__lock, ceilval | 2);
          }
        while (atomic_compare_and_exchange_val_acq (&mutex->__data.__lock,
                            ceilval | 2, ceilval)
           != ceilval);
      }
    while ((oldval & PTHREAD_MUTEX_PRIO_CEILING_MASK) != ceilval);

    assert (mutex->__data.__owner == 0);
    mutex->__data.__count = 1;
      }
      break;

    default:
      /* Correct code cannot set any other type.  */
      return EINVAL;
    }

  /* Record the ownership.  */
  mutex->__data.__owner = id;
#ifndef NO_INCR
  ++mutex->__data.__nusers;
#endif

  return retval;
}
#ifndef __pthread_mutex_lock
strong_alias (__pthread_mutex_lock, pthread_mutex_lock)
strong_alias (__pthread_mutex_lock, __pthread_mutex_lock_internal)
#endif