| /* |
| * Copyright (c) 2012 The Native Client Authors. All rights reserved. |
| * Use of this source code is governed by a BSD-style license that can be |
| * found in the LICENSE file. |
| */ |
| |
| /* |
| * Testing suite for NativeClient threads |
| */ |
| |
| #include <errno.h> |
| #include <limits.h> |
| #include <pthread.h> |
| #include <semaphore.h> |
| #include <stdint.h> |
| #include <stdio.h> |
| #include <stdlib.h> |
| #include <string.h> |
| #include <sys/time.h> |
| #include <time.h> |
| #include <unistd.h> |
| #include "gtest/gtest.h" |
| |
| #define ARRAY_SIZE(x) (sizeof (x) / sizeof (x)[0]) |
| #define TIMEOUT_TIME_NS (500 * 1000 * 1000) /* 500 ms in ns */ |
| #define TIMEOUT_EARLY_MS (18) |
| #define TIMEOUT_CHECK_NS (TIMEOUT_TIME_NS - TIMEOUT_EARLY_MS * 1000ull * 1000) |
| |
| |
| // Currently we do not have tsan running so disabling ANNOTATE_* macros |
| #if !defined(__has_feature) || !__has_feature(thread_sanitizer) |
| #define ANNOTATE_HAPPENS_BEFORE(addr) |
| #define ANNOTATE_HAPPENS_AFTER(addr) |
| #define ANNOTATE_IGNORE_WRITES_BEGIN() |
| #define ANNOTATE_IGNORE_WRITES_END() |
| #endif |
| |
| int g_num_test_loops = 500; |
| |
| /* Macros so we can use it for array dimensions in ISO C90 */ |
| #define NUM_THREADS 10 |
| |
| __thread int tls_var = 5; |
| |
| int g_ready = 0; |
| |
| int g_errors = 0; |
| |
| int g_verbose = 0; |
| |
| #define PRINT(cond, mesg) do { if (cond) { \ |
| printf("%s:%d:%d: ", \ |
| __FUNCTION__, \ |
| __LINE__, \ |
| (int)pthread_self()); \ |
| printf mesg; \ |
| fflush(stdout); \ |
| }\ |
| } while (0) |
| |
| #define PRINT_ERROR do { PRINT(1, ("Error\n")); g_errors++; } while (0) |
| |
| |
| struct SYNC_DATA { |
| pthread_mutex_t mutex; |
| pthread_cond_t cv; |
| }; |
| |
| namespace { |
| |
| class ThreadTests : public ::testing::Test { |
| protected: |
| ThreadTests() { |
| // You can do set-up work for each test here. |
| } |
| |
| ~ThreadTests() override { |
| } |
| |
| |
| void SetUp() override { |
| } |
| |
| void TearDown() override { |
| } |
| }; |
| |
| } //namespace |
| |
| |
| typedef void* (*ThreadFunction)(void *state); |
| |
| |
| void* FastThread(void *userdata) { |
| /* do nothing and immediately exit */ |
| return 0; |
| } |
| |
| /* Dispatches to pthread_create while allowing for a large, but |
| * finite number of attempts to get past EAGAIN by busylooping. |
| */ |
| |
| int pthread_create_check_eagain(pthread_t *thread_id, |
| pthread_attr_t *attr, |
| void *(*func) (void *), |
| void *state) { |
| int64_t loop_c = 0; |
| int p = 0; |
| |
| while (EAGAIN == (p = pthread_create(thread_id, attr, func, state))) { |
| /* Busyloop. The comparison slows things down a little. */ |
| /* The 6000 is an arbitrary cut-off point for the busyloop */ |
| EXPECT_LE(loop_c, 6000); |
| loop_c++; |
| } |
| |
| return p; |
| } |
| |
| /* creates and waits via pthread_join() for thread to exit */ |
| void CreateWithJoin(ThreadFunction func, void *state) { |
| pthread_t thread_id; |
| void* thread_ret; |
| ASSERT_EQ(0, pthread_create_check_eagain(&thread_id, NULL, func, state)); |
| /* wait for thread to exit */ |
| ASSERT_EQ(0, pthread_join(thread_id, &thread_ret)); |
| } |
| |
| |
| /* creates as detached thread, cannot join */ |
| void CreateDetached(void) { |
| pthread_t thread_id; |
| pthread_attr_t attr; |
| ASSERT_EQ(0, pthread_attr_init(&attr)); |
| ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)); |
| ASSERT_EQ(0, pthread_create_check_eagain(&thread_id, &attr, FastThread, NULL)); |
| /* cannot join on detached thread */ |
| } |
| |
| |
| void* TlsThread(void* state) { |
| struct SYNC_DATA* sync_data = (struct SYNC_DATA*)state; |
| PRINT(g_verbose, ("start signal thread: %d\n", tls_var)); |
| EXPECT_EQ(0, pthread_mutex_lock(&sync_data->mutex)); |
| tls_var = 8; |
| g_ready = 1; |
| EXPECT_EQ(0, pthread_cond_signal(&sync_data->cv)); |
| EXPECT_EQ(0, pthread_mutex_unlock(&sync_data->mutex)); |
| PRINT(g_verbose, ("terminate signal thread\n")); |
| return (void*)33; |
| } |
| |
| |
| TEST_F(ThreadTests, TestTlsAndSync) { |
| pthread_t thread_id; |
| pthread_attr_t attr; |
| struct SYNC_DATA sync_data; |
| |
| ASSERT_EQ(0, pthread_mutex_init(&sync_data.mutex, NULL)); |
| ASSERT_EQ(0, pthread_cond_init(&sync_data.cv, NULL)); |
| ASSERT_EQ(0, pthread_attr_init(&attr)); |
| ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)); |
| |
| ASSERT_EQ(0, pthread_create_check_eagain(&thread_id, &attr, |
| TlsThread, &sync_data)); |
| |
| EXPECT_EQ(5, tls_var); |
| ASSERT_EQ(0, pthread_mutex_lock(&sync_data.mutex)); |
| |
| while (!g_ready) { |
| ASSERT_EQ(0, pthread_cond_wait(&sync_data.cv, &sync_data.mutex)); |
| } |
| |
| EXPECT_EQ(5, tls_var); |
| |
| ASSERT_EQ(0, pthread_mutex_unlock(&sync_data.mutex)); |
| EXPECT_EQ(5, tls_var); |
| } |
| |
| |
| TEST_F(ThreadTests, TestManyThreadsJoinable) { |
| int i; |
| for (i = 0; i < g_num_test_loops; i++) { |
| if (i % (g_num_test_loops / 10) == 0) { |
| PRINT(g_verbose, ("round %d\n", i)); |
| } |
| CreateWithJoin(FastThread, NULL); |
| } |
| } |
| |
| |
| TEST_F(ThreadTests, TestManyThreadsDetached) { |
| int i; |
| for (i = 0; i < g_num_test_loops; i++) { |
| if (i % (g_num_test_loops / 10) == 0) { |
| PRINT(g_verbose, ("round %d\n", i)); |
| } |
| CreateDetached(); |
| } |
| } |
| |
| |
| void* SemaphoresThread(void *state) { |
| sem_t* sem = (sem_t*) state; |
| int i = 0, rv; |
| for (i = 0; i < g_num_test_loops; i++) { |
| rv = sem_wait(&sem[0]); |
| EXPECT_EQ(0, rv); |
| rv = sem_post(&sem[1]); |
| EXPECT_EQ(0, rv); |
| } |
| EXPECT_EQ(g_num_test_loops, i); |
| return 0; |
| } |
| |
| TEST_F(ThreadTests, TestSemaphores) { |
| int i; |
| int rv; |
| pthread_t thread_id; |
| pthread_attr_t attr; |
| sem_t sem[2]; |
| sem_init(&sem[0], 0, 0); |
| sem_init(&sem[1], 0, 0); |
| |
| ASSERT_EQ(0, pthread_attr_init(&attr)); |
| ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)); |
| |
| ASSERT_EQ(0, pthread_create_check_eagain(&thread_id, &attr, |
| SemaphoresThread, sem)); |
| |
| for (i = 0; i < g_num_test_loops; i++) { |
| if (i % (g_num_test_loops / 10) == 0) { |
| PRINT(g_verbose, ("round %d\n", i)); |
| } |
| rv = sem_post(&sem[0]); |
| EXPECT_EQ(0, rv); |
| rv = sem_wait(&sem[1]); |
| EXPECT_EQ(0, rv); |
| } |
| sem_destroy(&sem[0]); |
| sem_destroy(&sem[1]); |
| } |
| |
| TEST_F(ThreadTests, TestSemaphoreInitDestroy) { |
| sem_t sem; |
| int rv; |
| |
| rv = sem_init(&sem, 0, (unsigned) SEM_VALUE_MAX + 1); |
| EXPECT_EQ(-1, rv); /* failure */ |
| |
| rv = sem_init(&sem, 0, SEM_VALUE_MAX); |
| EXPECT_EQ(0, rv); /* success */ |
| rv = sem_destroy(&sem); |
| EXPECT_EQ(0, rv); |
| |
| rv = sem_init(&sem, 0, 0); |
| EXPECT_EQ(0, rv); /* success */ |
| rv = sem_destroy(&sem); |
| EXPECT_EQ(0, rv); |
| |
| } |
| |
| TEST_F(ThreadTests, TestTryLockReturnValue) { |
| pthread_mutex_t mutex; |
| int rv; |
| |
| ASSERT_EQ(0, pthread_mutex_init(&mutex, NULL)); |
| ASSERT_EQ(0, pthread_mutex_lock(&mutex)); |
| rv = pthread_mutex_trylock(&mutex); |
| |
| EXPECT_EQ(EBUSY, rv); |
| } |
| |
| TEST_F(ThreadTests, TestDoubleUnlockReturnValue) { |
| pthread_mutex_t mutex; |
| int rv; |
| |
| /* |
| * Calling pthread_mutex_unlock on an unlocked mutex is actually |
| * undefined behavior under POSIX unless it's an ERRORCHECK mutex. |
| */ |
| pthread_mutexattr_t attr; |
| ASSERT_EQ(0, pthread_mutexattr_init(&attr)); |
| ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK)); |
| ASSERT_EQ(0, pthread_mutex_init(&mutex, &attr)); |
| |
| ASSERT_EQ(0, pthread_mutex_lock(&mutex)); |
| ASSERT_EQ(0, pthread_mutex_unlock(&mutex)); |
| rv = pthread_mutex_unlock(&mutex); |
| |
| EXPECT_EQ(EPERM, rv); |
| |
| } |
| |
| TEST_F(ThreadTests, TestUnlockUninitializedReturnValue) { |
| pthread_mutex_t mutex; |
| int rv; |
| |
| /* |
| * Calling pthread_mutex_unlock on an unlocked mutex is actually |
| * undefined behavior under POSIX unless it's an ERRORCHECK mutex. |
| */ |
| pthread_mutexattr_t attr; |
| ASSERT_EQ(0, pthread_mutexattr_init(&attr)); |
| ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK)); |
| ASSERT_EQ(0, pthread_mutex_init(&mutex, &attr)); |
| |
| rv = pthread_mutex_unlock(&mutex); |
| |
| EXPECT_EQ(EPERM, rv); |
| |
| } |
| |
| pthread_once_t once_control = PTHREAD_ONCE_INIT; |
| |
| typedef int AtomicInt32; |
| |
| void pthread_once_routine(void) { |
| static AtomicInt32 count = 0; |
| AtomicInt32 res = __sync_fetch_and_add(&count, 1); |
| EXPECT_LE(res, 1); |
| } |
| |
| void* OnceThread(void *userdata) { |
| EXPECT_EQ(0, pthread_once(&once_control, pthread_once_routine)); |
| return 0; |
| } |
| |
| |
| TEST_F(ThreadTests, TestPthreadOnce) { |
| int i; |
| PRINT(g_verbose, ("creating %d threads\n", g_num_test_loops)); |
| for (i = 0; i < g_num_test_loops; i++) { |
| pthread_t thread_id; |
| pthread_attr_t attr; |
| if (i % (g_num_test_loops / 10) == 0) { |
| PRINT(g_verbose, ("round %d\n", i)); |
| } |
| ASSERT_EQ(0, pthread_attr_init(&attr)); |
| ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)); |
| ASSERT_EQ(0, pthread_create_check_eagain(&thread_id, &attr, OnceThread, NULL)); |
| } |
| } |
| |
| void* RecursiveLockThread(void *state) { |
| int i; |
| pthread_mutex_t *lock = (pthread_mutex_t *)state; |
| |
| for (i = 0; i < g_num_test_loops; ++i) { |
| EXPECT_EQ(0, pthread_mutex_lock(lock)); |
| } |
| |
| for (i = 0; i < g_num_test_loops; ++i) { |
| EXPECT_EQ(0, pthread_mutex_unlock(lock)); |
| } |
| |
| return 0; |
| } |
| |
| TEST_F(ThreadTests, TestRecursiveMutex) { |
| pthread_mutexattr_t attr; |
| pthread_mutex_t mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP; |
| pthread_t tid[NUM_THREADS]; |
| int i = 0; |
| |
| PRINT(g_verbose, ("starting threads\n")); |
| for (i = 0; i < NUM_THREADS; ++i) { |
| ASSERT_EQ(0, pthread_create_check_eagain(&tid[i], NULL, |
| RecursiveLockThread, &mutex)); |
| } |
| |
| PRINT(g_verbose, ("joining threads\n")); |
| for (i = 0; i < NUM_THREADS; ++i) { |
| ASSERT_EQ(0, pthread_join(tid[i], NULL)); |
| } |
| |
| PRINT(g_verbose, ("checking\n")); |
| ASSERT_EQ(0, pthread_mutex_lock(&mutex)); |
| ASSERT_EQ(0, pthread_mutex_trylock(&mutex)); |
| ASSERT_EQ(0, pthread_mutex_unlock(&mutex)); |
| ASSERT_EQ(0, pthread_mutex_unlock(&mutex)); |
| |
| ASSERT_EQ(0, pthread_mutex_destroy(&mutex)); |
| memset(&mutex, 0, sizeof(mutex)); |
| |
| ASSERT_EQ(0, pthread_mutexattr_init(&attr)); |
| ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE_NP)); |
| ASSERT_EQ(0, pthread_mutex_init(&mutex, &attr)); |
| |
| PRINT(g_verbose, ("starting threads\n")); |
| for (i = 0; i < NUM_THREADS; ++i) { |
| ASSERT_EQ(0, pthread_create_check_eagain(&tid[i], NULL, |
| RecursiveLockThread, &mutex)); |
| } |
| |
| PRINT(g_verbose, ("joining threads\n")); |
| for (i = 0; i < NUM_THREADS; ++i) { |
| ASSERT_EQ(0, pthread_join(tid[i], NULL)); |
| } |
| } |
| |
| |
| TEST_F(ThreadTests, TestErrorCheckingMutex) { |
| pthread_mutexattr_t attr; |
| pthread_mutex_t mutex; |
| int rv; |
| |
| ASSERT_EQ(0, pthread_mutexattr_init(&attr)); |
| ASSERT_EQ(0, pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK_NP)); |
| ASSERT_EQ(0, pthread_mutex_init(&mutex, &attr)); |
| |
| rv = pthread_mutex_unlock(&mutex); |
| EXPECT_NE(0, rv); |
| |
| ASSERT_EQ(0, pthread_mutex_lock(&mutex)); |
| |
| rv = pthread_mutex_trylock(&mutex); |
| EXPECT_NE(0, rv); |
| |
| ASSERT_EQ(0, pthread_mutex_unlock(&mutex)); |
| |
| rv = pthread_mutex_unlock(&mutex); |
| EXPECT_NE(0, rv); |
| } |
| |
| |
| void tsd_destructor(void *arg) { |
| *(int*)arg += 1; |
| } |
| |
| pthread_key_t tsd_key; |
| |
| |
| void* TsdThread(void *state) { |
| EXPECT_EQ(0, pthread_setspecific(tsd_key, state)); |
| |
| return 0; |
| } |
| |
| |
| TEST_F(ThreadTests, TestTSD) { |
| int rv; |
| void* ptr; |
| int destructor_count = 0; |
| ASSERT_EQ(0, pthread_key_create(&tsd_key, tsd_destructor)); |
| |
| ASSERT_EQ(0, pthread_setspecific(tsd_key, &rv)); |
| |
| ptr = pthread_getspecific(tsd_key); |
| EXPECT_EQ(ptr, &rv); |
| |
| CreateWithJoin(TsdThread, &destructor_count); |
| EXPECT_EQ(1, destructor_count); |
| |
| ASSERT_EQ(0, pthread_key_delete(tsd_key)); |
| |
| } |
| |
| |
| void *PthreadExitThread(void *unused) { |
| pthread_exit((void *) 1234); |
| /* Should not reach here. */ |
| abort(); |
| return NULL; |
| } |
| |
| TEST_F(ThreadTests, TestPthreadExit) { |
| pthread_t tid; |
| void *result; |
| |
| ASSERT_EQ(0, pthread_create_check_eagain(&tid, NULL, PthreadExitThread, NULL)); |
| ASSERT_EQ(0, pthread_join(tid, &result)); |
| EXPECT_EQ(result, (void *) 1234); |
| } |
| |
| |
| void* MallocSmallThread(void *userdata) { |
| void* ptr = 0; |
| int i; |
| for (i = 0; i < g_num_test_loops; ++i) { |
| ptr = (void*) malloc(16); |
| EXPECT_NE(nullptr, ptr); |
| } |
| return ptr; |
| } |
| |
| |
| TEST_F(ThreadTests, TestMallocSmall) { |
| int i = 0; |
| pthread_t tid[NUM_THREADS]; |
| |
| PRINT(g_verbose, ("starting threads\n")); |
| for (i = 0; i < NUM_THREADS; ++i) { |
| ASSERT_EQ(0, pthread_create_check_eagain(&tid[i], NULL, |
| MallocSmallThread, NULL)); |
| } |
| |
| PRINT(g_verbose, ("joining threads\n")); |
| for (i = 0; i < NUM_THREADS; ++i) { |
| void* mem; |
| ASSERT_EQ(0, pthread_join(tid[i], &mem)); |
| free(mem); |
| } |
| } |
| |
| |
| /* Test large allocations and deallocations in order to cover |
| grow_heap() and shrink_heap() in glibc's malloc/arena.c. */ |
| void* MallocLargeThread(void *unused) { |
| void *blocks[100]; |
| int i; |
| for (i = 0; i < 100; i++) { |
| blocks[i] = malloc(0x1000); |
| EXPECT_NE(blocks[i], nullptr); |
| } |
| for (i = 0; i < 100; i++) { |
| free(blocks[i]); |
| } |
| return NULL; |
| } |
| |
| TEST_F(ThreadTests, TestMallocLarge) { |
| int i = 0; |
| pthread_t tid[NUM_THREADS]; |
| |
| for (i = 0; i < NUM_THREADS; i++) { |
| ASSERT_EQ(0, pthread_create_check_eagain(&tid[i], NULL, |
| MallocLargeThread, NULL)); |
| } |
| for (i = 0; i < NUM_THREADS; i++) { |
| ASSERT_EQ(0, pthread_join(tid[i], NULL)); |
| } |
| } |
| |
| |
| void* ReallocThread(void *userdata) { |
| void* ptr; |
| int i; |
| ptr = (void*) malloc(16); |
| for (i = 0; i < g_num_test_loops; ++i) { |
| ptr = (void*)realloc(ptr, 32); |
| EXPECT_NE(nullptr, ptr); |
| ptr = (void*)realloc(ptr, 64000); |
| EXPECT_NE(nullptr, ptr); |
| ptr = (void*)realloc(ptr, 64); |
| EXPECT_NE(nullptr, ptr); |
| ptr = (void*)realloc(ptr, 32000); |
| EXPECT_NE(nullptr, ptr); |
| ptr = (void*)realloc(ptr, 256); |
| EXPECT_NE(nullptr, ptr); |
| } |
| |
| return ptr; |
| } |
| |
| |
| TEST_F(ThreadTests, TestRealloc) { |
| pthread_t tid[NUM_THREADS]; |
| int i = 0; |
| for (i = 0; i < NUM_THREADS; ++i) { |
| ASSERT_EQ(0, pthread_create_check_eagain(&tid[i], NULL, ReallocThread, NULL)); |
| } |
| |
| for (i = 0; i < NUM_THREADS; ++i) { |
| ASSERT_EQ(0, pthread_join(tid[i], NULL)); |
| } |
| |
| } |
| |
| /* Worker threads should spin-wait for this condition before starting work. */ |
| static volatile int workers_begin; |
| |
| /* Which intrinsic are we testing? */ |
| static enum { COMPARE_AND_SWAP, FETCH_AND_ADD } intrinsic; |
| |
| /* Perform 1 million atomic increments of the counter pointed to by |
| * |data|, and checks the final result. Uses the increment strategy |
| * specified by the |intrinsic| global. */ |
| #define ATOMIC_ITERATIONS 1000000 |
| |
| /* |
| * Define max unfairness as less than 0.1% of true fairness. |
| * NOTE: This adds potential flakiness on very exotic architectures, but |
| * we are not supporting those today. |
| */ |
| #define MAX_UNFAIRNESS (1000 * NUM_THREADS) |
| |
| |
| static void* WorkerThread(void *data) { |
| volatile AtomicInt32* counter = (volatile AtomicInt32*) data; |
| volatile int bogus = 0; |
| static int backoff[8] = { 8, 16, 32, 64, 128, 256, 1024, 2048 }; |
| int success = 0; |
| |
| int ii, jj, kk; |
| |
| /* NB, gets stuck on ARM QEMU. */ |
| while (!workers_begin) |
| ; |
| ANNOTATE_HAPPENS_AFTER(&workers_begin); |
| |
| for (ii = 0; ii < ATOMIC_ITERATIONS; ++ii) { |
| switch (intrinsic) { |
| case COMPARE_AND_SWAP: |
| /* NB, not atomic on ARM QEMU. */ |
| for (jj = 0; ; jj++) { |
| AtomicInt32 prev = *counter; |
| if (__sync_val_compare_and_swap(counter, prev, prev + 1) == prev) { |
| |
| /* Add win backoff to allow other threads to win */ |
| for (kk = 0; kk < backoff[success & 0x7]; kk++) bogus++; |
| |
| success++; |
| break; |
| } |
| |
| /* Failed, so reset number of successive swaps */ |
| success = 0; |
| |
| /* Add a break out condition in case "volatile" is broken or |
| the atomic operation is exceedingly unfair. */ |
| if (jj > MAX_UNFAIRNESS) { |
| printf("Stuck or exceeded unfairness.\n"); |
| break; |
| } |
| } |
| break; |
| case FETCH_AND_ADD: |
| __sync_fetch_and_add(counter, 1); |
| break; |
| default: |
| abort(); |
| } |
| } |
| return NULL; |
| } |
| |
| /* Runs 10 copies of WorkerThread in parallel. The address of a |
| * shared volatile AtomicInt32 counter is passed to each thread. |
| */ |
| static void CheckAtomicityUnderConcurrency(void) { |
| volatile AtomicInt32 counter = 0; |
| pthread_t threads[NUM_THREADS]; |
| unsigned long ii; |
| |
| workers_begin = 0; /* Hold on... */ |
| for (ii = 0; ii < ARRAY_SIZE(threads); ++ii) |
| ASSERT_EQ(0, pthread_create_check_eagain(&threads[ii], NULL, &WorkerThread, |
| (void*) &counter)); |
| |
| ANNOTATE_HAPPENS_BEFORE(&workers_begin); |
| ANNOTATE_IGNORE_WRITES_BEGIN(); |
| workers_begin = 1; /* Thunderbirds are go! */ |
| ANNOTATE_IGNORE_WRITES_END(); |
| |
| for (ii = 0; ii < ARRAY_SIZE(threads); ++ii) |
| ASSERT_EQ(0, pthread_join(threads[ii], NULL)); |
| EXPECT_EQ(ATOMIC_ITERATIONS * ARRAY_SIZE(threads), (unsigned long) counter); |
| } |
| |
| /* Test hand-written intrinsics for ARM. */ |
| TEST_F(ThreadTests, TestIntrinsics) { |
| |
| /* Test uncontended behaviour: */ |
| { |
| /* COMPARE_AND_SWAP */ |
| volatile AtomicInt32 x = 123; |
| EXPECT_EQ(123, __sync_val_compare_and_swap(&x, 123, 42)); /* matches */ |
| EXPECT_EQ(42, x); /* => swapped */ |
| EXPECT_EQ(42, __sync_val_compare_and_swap(&x, 43, 9876)); /* no match */ |
| EXPECT_EQ(42, x); /* => unchanged */ |
| |
| /* FETCH_AND_ADD */ |
| x = 123; |
| EXPECT_EQ(123, __sync_fetch_and_add(&x, 42)); |
| EXPECT_EQ(165, x); |
| EXPECT_EQ(165, __sync_fetch_and_add(&x, 1)); |
| EXPECT_EQ(166, x); |
| } |
| |
| /* Test behaviour with concurrency: */ |
| intrinsic = COMPARE_AND_SWAP; |
| CheckAtomicityUnderConcurrency(); |
| |
| intrinsic = FETCH_AND_ADD; |
| CheckAtomicityUnderConcurrency(); |
| } |
| |
| TEST_F(ThreadTests, TestCondvar) { |
| int i = 0; |
| pthread_cond_t cv; |
| pthread_mutex_t mu; |
| struct timeval tv; |
| struct timespec ts; |
| int res = 0; |
| ASSERT_EQ(0, pthread_mutex_init(&mu, NULL)); |
| ASSERT_EQ(0, pthread_cond_init(&cv, NULL)); |
| |
| /* We just need the condvar to expire, so we use the current time */ |
| res = gettimeofday(&tv, NULL); |
| EXPECT_EQ(res, 0); |
| ts.tv_sec = tv.tv_sec; |
| ts.tv_nsec = 0; |
| |
| ASSERT_EQ(0, pthread_mutex_lock(&mu)); |
| /* We try several times since the wait may return for a different reason. */ |
| while (i < 10) { |
| res = pthread_cond_timedwait(&cv, &mu, &ts); |
| if (res == ETIMEDOUT) |
| break; |
| i++; |
| } |
| EXPECT_EQ(ETIMEDOUT, res); |
| |
| ASSERT_EQ(0, pthread_mutex_unlock(&mu)); |
| |
| ASSERT_EQ(0, pthread_cond_destroy(&cv)); |
| ASSERT_EQ(0, pthread_mutex_destroy(&mu)); |
| } |
| |
| |
| TEST_F(ThreadTests, TestCondvarAttrs) { |
| clockid_t clock_id = -1; |
| int shared = -1; |
| pthread_cond_t cv; |
| pthread_condattr_t attr; |
| |
| /* Verify default attribute settings */ |
| ASSERT_EQ(0, pthread_condattr_init(&attr)); |
| ASSERT_EQ(0, pthread_condattr_getclock(&attr, &clock_id)); |
| EXPECT_EQ(CLOCK_REALTIME, clock_id); |
| EXPECT_EQ(PTHREAD_PROCESS_PRIVATE, shared); |
| |
| ASSERT_EQ(0, pthread_cond_init(&cv, &attr)); |
| ASSERT_EQ(0, pthread_cond_destroy(&cv)); |
| |
| /* Verify we can set attributes to their default value. */ |
| ASSERT_EQ(0, pthread_condattr_setclock(&attr, CLOCK_REALTIME)); |
| ASSERT_EQ(0, pthread_cond_init(&cv, &attr)); |
| ASSERT_EQ(0, pthread_cond_destroy(&cv)); |
| ASSERT_EQ(0, pthread_condattr_destroy(&attr)); |
| |
| /* |
| * Verify that setting attributes to unsupported values fails. |
| */ |
| ASSERT_EQ(0, pthread_condattr_init(&attr)); |
| EXPECT_EQ(ENOTSUP, pthread_condattr_setclock(&attr, CLOCK_MONOTONIC)); |
| PTHREAD_PROCESS_SHARED)); |
| ASSERT_EQ(0, pthread_condattr_destroy(&attr)); |
| } |
| |
| void AddNanosecondsToTimespec(struct timespec *time, unsigned int nanoseconds) { |
| EXPECT_LE(nanoseconds, 1000000000u); |
| time->tv_nsec += nanoseconds; |
| if (time->tv_nsec > 1000000000) { |
| time->tv_nsec -= 1000000000; |
| time->tv_sec += 1; |
| } |
| } |
| |
| TEST_F(ThreadTests, TestCondvarTimeout) { |
| int i = 0; |
| pthread_cond_t cv; |
| pthread_mutex_t mu; |
| struct timespec t_start; |
| struct timespec t_timeout; |
| struct timespec t_end; |
| uint64_t elapsed_ns; |
| int res = 0; |
| ASSERT_EQ(0, pthread_mutex_init(&mu, NULL)); |
| ASSERT_EQ(0, pthread_cond_init(&cv, NULL)); |
| |
| /* |
| * The timeout value for pthread_cond_timedwait is in absolute |
| * CLOCK_REALTIME time, so we use the current time and add the |
| * desired elapsed time to it. |
| */ |
| res = clock_gettime(CLOCK_REALTIME, &t_start); |
| EXPECT_EQ(res, 0); |
| t_timeout = t_start; |
| AddNanosecondsToTimespec(&t_timeout, TIMEOUT_TIME_NS); |
| |
| ASSERT_EQ(0, pthread_mutex_lock(&mu)); |
| |
| /* We try several times since the wait may return for a different reason. */ |
| while (i < 10) { |
| res = pthread_cond_timedwait(&cv, &mu, &t_timeout); |
| if (res == ETIMEDOUT) |
| break; |
| printf("res = %d\n", res); |
| i++; |
| } |
| printf("res = %d, ETIMEDOUT = %d\n", res, ETIMEDOUT); |
| EXPECT_EQ(ETIMEDOUT, res); |
| |
| ASSERT_EQ(0, pthread_mutex_unlock(&mu)); |
| |
| res = clock_gettime(CLOCK_REALTIME, &t_end); |
| EXPECT_EQ(res, 0); |
| |
| elapsed_ns = 1000 * 1000 * 1000 * (t_end.tv_sec - t_start.tv_sec) + |
| (t_end.tv_nsec - t_start.tv_nsec); |
| printf("Elapsed time %llu ns\n", elapsed_ns); |
| |
| EXPECT_GE(elapsed_ns, TIMEOUT_CHECK_NS); |
| |
| ASSERT_EQ(0, pthread_cond_destroy(&cv)); |
| ASSERT_EQ(0, pthread_mutex_destroy(&mu)); |
| } |
| |
| TEST_F(ThreadTests, TestScope) { |
| pthread_attr_t attr; |
| int scope; |
| |
| /* Check that the default scope is PTHREAD_SCOPE_SYSTEM */ |
| ASSERT_EQ(0, pthread_attr_init(&attr)); |
| ASSERT_EQ(0, pthread_attr_getscope(&attr, &scope)); |
| EXPECT_EQ(PTHREAD_SCOPE_SYSTEM, scope); |
| |
| /* Setting to PTHREAD_SCOPE_PROCESS is invalid */ |
| EXPECT_EQ(ENOTSUP, pthread_attr_setscope(&attr, PTHREAD_SCOPE_PROCESS)); |
| EXPECT_EQ(EINVAL, pthread_attr_setscope(&attr, 0xff)); |
| |
| /* Setting to PTHREAD_SCOPE_SYSTEM should work (no-op) */ |
| ASSERT_EQ(0, pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM)); |
| } |
| |
| TEST_F(ThreadTests, TestStackSize) { |
| pthread_attr_t attr; |
| size_t stack_size, stack_size2; |
| |
| ASSERT_EQ(0, pthread_attr_init(&attr)); |
| ASSERT_EQ(0, pthread_attr_getstacksize(&attr, &stack_size)); |
| stack_size *= 2; |
| |
| ASSERT_EQ(0, pthread_attr_setstacksize(&attr, stack_size)); |
| ASSERT_EQ(0, pthread_attr_getstacksize(&attr, &stack_size2)); |
| |
| EXPECT_EQ(stack_size, stack_size2); |
| } |
| |
| struct MutexClaimerThreadArgs { |
| pthread_mutex_t *mutex; |
| int should_exit; |
| }; |
| |
| void *MutexClaimerThread(void *thread_arg) { |
| struct MutexClaimerThreadArgs *args = (struct MutexClaimerThreadArgs *)thread_arg; |
| for (;;) { |
| pthread_mutex_lock(args->mutex); |
| int should_exit = args->should_exit; |
| pthread_mutex_unlock(args->mutex); |
| if (should_exit) |
| break; |
| } |
| return NULL; |
| } |
| |
| /* |
| * This is a regression test for |
| * http://code.google.com/p/nativeclient/issues/detail?id=3047 |
| * |
| * The problem was that mutexes created with PTHREAD_MUTEX_ERRORCHECK |
| * didn't unlock successfully after pthread_cond_timedwait() had |
| * returned with ETIMEDOUT. This problem occurred with NaCl's |
| * newlib-based libpthread. |
| */ |
| TEST_F(ThreadTests, TestErrorcheckMutexWorksWithCondvarTimeout) { |
| |
| pthread_mutexattr_t attrs; |
| pthread_mutex_t mutex; |
| pthread_cond_t condvar; |
| ASSERT_EQ(0, pthread_mutexattr_init(&attrs)); |
| ASSERT_EQ(0, pthread_mutexattr_settype(&attrs, PTHREAD_MUTEX_ERRORCHECK)); |
| ASSERT_EQ(0, pthread_mutex_init(&mutex, &attrs)); |
| ASSERT_EQ(0, pthread_mutexattr_destroy(&attrs)); |
| ASSERT_EQ(0, pthread_cond_init(&condvar, NULL)); |
| ASSERT_EQ(0, pthread_mutex_lock(&mutex)); |
| |
| struct MutexClaimerThreadArgs thread_args; |
| thread_args.mutex = &mutex; |
| thread_args.should_exit = 0; |
| pthread_t tid; |
| ASSERT_EQ(0, pthread_create(&tid, NULL, MutexClaimerThread, &thread_args)); |
| |
| struct timespec timeout; |
| EXPECT_EQ(clock_gettime(CLOCK_REALTIME, &timeout), 0); |
| /* |
| * Wait for 500 ms to give MutexClaimerThread() time to run and |
| * briefly claim and release the lock, which unsets the mutex's |
| * owner_thread_id. |
| */ |
| AddNanosecondsToTimespec(&timeout, 500 * 1000); |
| /* |
| * The bug is that pthread_cond_timedwait() fails to update the |
| * mutex's owner_thread_id to the current thread in the ETIMEDOUT |
| * case. |
| */ |
| EXPECT_EQ(pthread_cond_timedwait(&condvar, &mutex, &timeout), ETIMEDOUT); |
| thread_args.should_exit = 1; |
| /* The bug manifests itself by pthread_mutex_unlock() returning EPERM. */ |
| ASSERT_EQ(0, pthread_mutex_unlock(&mutex)); |
| |
| /* Clean up. */ |
| ASSERT_EQ(0, pthread_join(tid, NULL)); |
| ASSERT_EQ(0, pthread_mutex_destroy(&mutex)); |
| ASSERT_EQ(0, pthread_cond_destroy(&condvar)); |
| } |