Parallelize task 2D with thread id support
diff --git a/include/pthreadpool.h b/include/pthreadpool.h
index 993204a..953ccc4 100644
--- a/include/pthreadpool.h
+++ b/include/pthreadpool.h
@@ -10,6 +10,7 @@
 typedef void (*pthreadpool_task_1d_with_thread_t)(void*, size_t, size_t);
 typedef void (*pthreadpool_task_1d_tile_1d_t)(void*, size_t, size_t);
 typedef void (*pthreadpool_task_2d_t)(void*, size_t, size_t);
+typedef void (*pthreadpool_task_2d_with_thread_t)(void*, size_t, size_t, size_t);
 typedef void (*pthreadpool_task_2d_tile_1d_t)(void*, size_t, size_t, size_t);
 typedef void (*pthreadpool_task_2d_tile_2d_t)(void*, size_t, size_t, size_t, size_t);
 typedef void (*pthreadpool_task_3d_t)(void*, size_t, size_t, size_t);
@@ -265,6 +266,40 @@
 	uint32_t flags);
 
 /**
+ * Process items on a 2D grid passing along the current thread id.
+ *
+ * The function implements a parallel version of the following snippet:
+ *
+ *   for (size_t i = 0; i < range_i; i++)
+ *     for (size_t j = 0; j < range_j; j++)
+ *       function(context, thread_index, i, j);
+ *
+ * When the function returns, all items have been processed and the thread pool
+ * is ready for a new task.
+ *
+ * @note If multiple threads call this function with the same thread pool, the
+ *    calls are serialized.
+ *
+ * @param threadpool  the thread pool to use for parallelisation. If threadpool
+ *    is NULL, all items are processed serially on the calling thread.
+ * @param function    the function to call for each item.
+ * @param context     the first argument passed to the specified function.
+ * @param range_i     the number of items to process along the first dimension
+ *    of the 2D grid.
+ * @param range_j     the number of items to process along the second dimension
+ *    of the 2D grid.
+ * @param flags       a bitwise combination of zero or more optional flags
+ *    (PTHREADPOOL_FLAG_DISABLE_DENORMALS or PTHREADPOOL_FLAG_YIELD_WORKERS)
+ */
+void pthreadpool_parallelize_2d_with_thread(
+	pthreadpool_t threadpool,
+	pthreadpool_task_2d_with_thread_t function,
+	void* context,
+	size_t range_i,
+	size_t range_j,
+	uint32_t flags);
+
+/**
  * Process items on a 2D grid with the specified maximum tile size along the
  * last grid dimension.
  *
diff --git a/src/fastpath.c b/src/fastpath.c
index e698af8..9ad58bf 100644
--- a/src/fastpath.c
+++ b/src/fastpath.c
@@ -227,6 +227,52 @@
 	pthreadpool_fence_release();
 }
 
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_2d_with_thread_fastpath(
+	struct pthreadpool* threadpool,
+	struct thread_info* thread)
+{
+	assert(threadpool != NULL);
+	assert(thread != NULL);
+
+	const pthreadpool_task_2d_with_thread_t task = (pthreadpool_task_2d_with_thread_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+	void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+	const size_t threads_count = threadpool->threads_count.value;
+	const size_t range_threshold = -threads_count;
+
+	/* Process thread's own range of items */
+	const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+	const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_2d.range_j;
+	const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(range_start, range_j);
+	size_t i = index_i_j.quotient;
+	size_t j = index_i_j.remainder;
+
+	while (pthreadpool_decrement_fetch_relaxed_size_t(&thread->range_length) < range_threshold) {
+		task(argument, 0, i, j);
+		if (++j == range_j.value) {
+			j = 0;
+			i += 1;
+		}
+	}
+
+	/* There still may be other threads with work */
+	const size_t thread_number = thread->thread_number;
+	for (size_t tid = modulo_decrement(thread_number, threads_count);
+		tid != thread_number;
+		tid = modulo_decrement(tid, threads_count))
+	{
+		struct thread_info* other_thread = &threadpool->threads[tid];
+		while (pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_length) < range_threshold) {
+			const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+			const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(linear_index, range_j);
+			task(argument, thread_number, index_i_j.quotient, index_i_j.remainder);
+		}
+	}
+
+	/* Make changes by this thread visible to other threads */
+	pthreadpool_fence_release();
+}
+
 PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_2d_tile_1d_fastpath(
 	struct pthreadpool* threadpool,
 	struct thread_info* thread)
diff --git a/src/portable-api.c b/src/portable-api.c
index 3b905da..9a077bb 100644
--- a/src/portable-api.c
+++ b/src/portable-api.c
@@ -209,6 +209,47 @@
 	pthreadpool_fence_release();
 }
 
+static void thread_parallelize_2d_with_thread(struct pthreadpool* threadpool, struct thread_info* thread) {
+	assert(threadpool != NULL);
+	assert(thread != NULL);
+
+	const pthreadpool_task_2d_with_thread_t task = (pthreadpool_task_2d_with_thread_t) pthreadpool_load_relaxed_void_p(&threadpool->task);
+	void *const argument = pthreadpool_load_relaxed_void_p(&threadpool->argument);
+
+	/* Process thread's own range of items */
+	const size_t range_start = pthreadpool_load_relaxed_size_t(&thread->range_start);
+	const struct fxdiv_divisor_size_t range_j = threadpool->params.parallelize_2d.range_j;
+	const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(range_start, range_j);
+	size_t i = index_i_j.quotient;
+	size_t j = index_i_j.remainder;
+
+	while (pthreadpool_try_decrement_relaxed_size_t(&thread->range_length)) {
+		task(argument, 0, i, j);
+		if (++j == range_j.value) {
+			j = 0;
+			i += 1;
+		}
+	}
+
+	/* There still may be other threads with work */
+	const size_t thread_number = thread->thread_number;
+	const size_t threads_count = threadpool->threads_count.value;
+	for (size_t tid = modulo_decrement(thread_number, threads_count);
+		tid != thread_number;
+		tid = modulo_decrement(tid, threads_count))
+	{
+		struct thread_info* other_thread = &threadpool->threads[tid];
+		while (pthreadpool_try_decrement_relaxed_size_t(&other_thread->range_length)) {
+			const size_t linear_index = pthreadpool_decrement_fetch_relaxed_size_t(&other_thread->range_end);
+			const struct fxdiv_result_size_t index_i_j = fxdiv_divide_size_t(linear_index, range_j);
+			task(argument, thread_number, index_i_j.quotient, index_i_j.remainder);
+		}
+	}
+
+	/* Make changes by this thread visible to other threads */
+	pthreadpool_fence_release();
+}
+
 static void thread_parallelize_2d_tile_1d(struct pthreadpool* threadpool, struct thread_info* thread) {
 	assert(threadpool != NULL);
 	assert(thread != NULL);
@@ -1746,6 +1787,48 @@
 	}
 }
 
+void pthreadpool_parallelize_2d_with_thread(
+	pthreadpool_t threadpool,
+	pthreadpool_task_2d_with_thread_t task,
+	void* argument,
+	size_t range_i,
+	size_t range_j,
+	uint32_t flags)
+{
+	size_t threads_count;
+	if (threadpool == NULL || (threads_count = threadpool->threads_count.value) <= 1 || (range_i | range_j) <= 1) {
+		/* No thread pool used: execute task sequentially on the calling thread */
+		struct fpu_state saved_fpu_state = { 0 };
+		if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+			saved_fpu_state = get_fpu_state();
+			disable_fpu_denormals();
+		}
+		for (size_t i = 0; i < range_i; i++) {
+			for (size_t j = 0; j < range_j; j++) {
+				task(argument, 0, i, j);
+			}
+		}
+		if (flags & PTHREADPOOL_FLAG_DISABLE_DENORMALS) {
+			set_fpu_state(saved_fpu_state);
+		}
+	} else {
+		const size_t range = range_i * range_j;
+		const struct pthreadpool_2d_params params = {
+			.range_j = fxdiv_init_size_t(range_j),
+		};
+		thread_function_t parallelize_2d_with_thread = &thread_parallelize_2d_with_thread;
+		#if PTHREADPOOL_USE_FASTPATH
+			const size_t range_threshold = -threads_count;
+			if (range < range_threshold) {
+				parallelize_2d_with_thread = &pthreadpool_thread_parallelize_2d_with_thread_fastpath;
+			}
+		#endif
+		pthreadpool_parallelize(
+			threadpool, parallelize_2d_with_thread, &params, sizeof(params),
+			task, argument, range, flags);
+	}
+}
+
 void pthreadpool_parallelize_2d_tile_1d(
 	pthreadpool_t threadpool,
 	pthreadpool_task_2d_tile_1d_t task,
diff --git a/src/shim.c b/src/shim.c
index f0b2d0c..36f7c00 100644
--- a/src/shim.c
+++ b/src/shim.c
@@ -92,6 +92,21 @@
 	}
 }
 
+void pthreadpool_parallelize_2d_with_thread(
+	struct pthreadpool* threadpool,
+	pthreadpool_task_2d_with_thread_t task,
+	void* argument,
+	size_t range_i,
+	size_t range_j,
+	uint32_t flags)
+{
+	for (size_t i = 0; i < range_i; i++) {
+		for (size_t j = 0; j < range_j; j++) {
+			task(argument, 0, i, j);
+		}
+	}
+}
+
 void pthreadpool_parallelize_2d_tile_1d(
 	pthreadpool_t threadpool,
 	pthreadpool_task_2d_tile_1d_t task,
diff --git a/src/threadpool-object.h b/src/threadpool-object.h
index 2ef12f5..52db369 100644
--- a/src/threadpool-object.h
+++ b/src/threadpool-object.h
@@ -802,6 +802,10 @@
 	struct pthreadpool* threadpool,
 	struct thread_info* thread);
 
+PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_2d_with_thread_fastpath(
+	struct pthreadpool* threadpool,
+	struct thread_info* thread);
+
 PTHREADPOOL_INTERNAL void pthreadpool_thread_parallelize_2d_tile_1d_fastpath(
 	struct pthreadpool* threadpool,
 	struct thread_info* thread);
diff --git a/test/pthreadpool.cc b/test/pthreadpool.cc
index 5806eaa..2fc67e8 100644
--- a/test/pthreadpool.cc
+++ b/test/pthreadpool.cc
@@ -1534,6 +1534,306 @@
 	EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize2DRangeI * kParallelize2DRangeJ);
 }
 
+static void ComputeNothing2DWithThread(void*, size_t, size_t, size_t) {
+}
+
+TEST(Parallelize2DWithThread, SingleThreadPoolCompletes) {
+	auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+	ASSERT_TRUE(threadpool.get());
+
+	pthreadpool_parallelize_2d_with_thread(threadpool.get(),
+		ComputeNothing2DWithThread,
+		nullptr,
+		kParallelize2DRangeI, kParallelize2DRangeJ,
+		0 /* flags */);
+}
+
+TEST(Parallelize2DWithThread, MultiThreadPoolCompletes) {
+	auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+	ASSERT_TRUE(threadpool.get());
+
+	if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+		GTEST_SKIP();
+	}
+
+	pthreadpool_parallelize_2d_with_thread(
+		threadpool.get(),
+		ComputeNothing2DWithThread,
+		nullptr,
+		kParallelize2DRangeI, kParallelize2DRangeJ,
+		0 /* flags */);
+}
+
+static void CheckBounds2DWithThread(void*, size_t, size_t i, size_t j) {
+	EXPECT_LT(i, kParallelize2DRangeI);
+	EXPECT_LT(j, kParallelize2DRangeJ);
+}
+
+TEST(Parallelize2DWithThread, SingleThreadPoolAllItemsInBounds) {
+	auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+	ASSERT_TRUE(threadpool.get());
+
+	pthreadpool_parallelize_2d_with_thread(
+		threadpool.get(),
+		CheckBounds2DWithThread,
+		nullptr,
+		kParallelize2DRangeI, kParallelize2DRangeJ,
+		0 /* flags */);
+}
+
+TEST(Parallelize2DWithThread, MultiThreadPoolAllItemsInBounds) {
+	auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+	ASSERT_TRUE(threadpool.get());
+
+	if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+		GTEST_SKIP();
+	}
+
+	pthreadpool_parallelize_2d_with_thread(
+		threadpool.get(),
+		CheckBounds2DWithThread,
+		nullptr,
+		kParallelize2DRangeI, kParallelize2DRangeJ,
+		0 /* flags */);
+}
+
+static void SetTrue2DWithThread(std::atomic_bool* processed_indicators, size_t, size_t i, size_t j) {
+	const size_t linear_idx = i * kParallelize2DRangeJ + j;
+	processed_indicators[linear_idx].store(true, std::memory_order_relaxed);
+}
+
+TEST(Parallelize2DWithThread, SingleThreadPoolAllItemsProcessed) {
+	std::vector<std::atomic_bool> indicators(kParallelize2DRangeI * kParallelize2DRangeJ);
+
+	auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+	ASSERT_TRUE(threadpool.get());
+
+	pthreadpool_parallelize_2d_with_thread(
+		threadpool.get(),
+		reinterpret_cast<pthreadpool_task_2d_with_thread_t>(SetTrue2DWithThread),
+		static_cast<void*>(indicators.data()),
+		kParallelize2DRangeI, kParallelize2DRangeJ,
+		0 /* flags */);
+
+	for (size_t i = 0; i < kParallelize2DRangeI; i++) {
+		for (size_t j = 0; j < kParallelize2DRangeJ; j++) {
+			const size_t linear_idx = i * kParallelize2DRangeJ + j;
+			EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+				<< "Element (" << i << ", " << j << ") not processed";
+		}
+	}
+}
+
+TEST(Parallelize2DWithThread, MultiThreadPoolAllItemsProcessed) {
+	std::vector<std::atomic_bool> indicators(kParallelize2DRangeI * kParallelize2DRangeJ);
+
+	auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+	ASSERT_TRUE(threadpool.get());
+
+	if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+		GTEST_SKIP();
+	}
+
+	pthreadpool_parallelize_2d_with_thread(
+		threadpool.get(),
+		reinterpret_cast<pthreadpool_task_2d_with_thread_t>(SetTrue2DWithThread),
+		static_cast<void*>(indicators.data()),
+		kParallelize2DRangeI, kParallelize2DRangeJ,
+		0 /* flags */);
+
+	for (size_t i = 0; i < kParallelize2DRangeI; i++) {
+		for (size_t j = 0; j < kParallelize2DRangeJ; j++) {
+			const size_t linear_idx = i * kParallelize2DRangeJ + j;
+			EXPECT_TRUE(indicators[linear_idx].load(std::memory_order_relaxed))
+				<< "Element (" << i << ", " << j << ") not processed";
+		}
+	}
+}
+
+static void Increment2DWithThread(std::atomic_int* processed_counters, size_t, size_t i, size_t j) {
+	const size_t linear_idx = i * kParallelize2DRangeJ + j;
+	processed_counters[linear_idx].fetch_add(1, std::memory_order_relaxed);
+}
+
+TEST(Parallelize2DWithThread, SingleThreadPoolEachItemProcessedOnce) {
+	std::vector<std::atomic_int> counters(kParallelize2DRangeI * kParallelize2DRangeJ);
+
+	auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+	ASSERT_TRUE(threadpool.get());
+
+	pthreadpool_parallelize_2d_with_thread(
+		threadpool.get(),
+		reinterpret_cast<pthreadpool_task_2d_with_thread_t>(Increment2DWithThread),
+		static_cast<void*>(counters.data()),
+		kParallelize2DRangeI, kParallelize2DRangeJ,
+		0 /* flags */);
+
+	for (size_t i = 0; i < kParallelize2DRangeI; i++) {
+		for (size_t j = 0; j < kParallelize2DRangeJ; j++) {
+			const size_t linear_idx = i * kParallelize2DRangeJ + j;
+			EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+				<< "Element (" << i << ", " << j << ") was processed "
+				<< counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+		}
+	}
+}
+
+TEST(Parallelize2DWithThread, MultiThreadPoolEachItemProcessedOnce) {
+	std::vector<std::atomic_int> counters(kParallelize2DRangeI * kParallelize2DRangeJ);
+
+	auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+	ASSERT_TRUE(threadpool.get());
+
+	if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+		GTEST_SKIP();
+	}
+
+	pthreadpool_parallelize_2d_with_thread(
+		threadpool.get(),
+		reinterpret_cast<pthreadpool_task_2d_with_thread_t>(Increment2DWithThread),
+		static_cast<void*>(counters.data()),
+		kParallelize2DRangeI, kParallelize2DRangeJ,
+		0 /* flags */);
+
+	for (size_t i = 0; i < kParallelize2DRangeI; i++) {
+		for (size_t j = 0; j < kParallelize2DRangeJ; j++) {
+			const size_t linear_idx = i * kParallelize2DRangeJ + j;
+			EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), 1)
+				<< "Element (" << i << ", " << j << ") was processed "
+				<< counters[linear_idx].load(std::memory_order_relaxed) << " times (expected: 1)";
+		}
+	}
+}
+
+TEST(Parallelize2DWithThread, SingleThreadPoolEachItemProcessedMultipleTimes) {
+	std::vector<std::atomic_int> counters(kParallelize2DRangeI * kParallelize2DRangeJ);
+
+	auto_pthreadpool_t threadpool(pthreadpool_create(1), pthreadpool_destroy);
+	ASSERT_TRUE(threadpool.get());
+
+	for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+		pthreadpool_parallelize_2d_with_thread(
+			threadpool.get(),
+			reinterpret_cast<pthreadpool_task_2d_with_thread_t>(Increment2DWithThread),
+			static_cast<void*>(counters.data()),
+			kParallelize2DRangeI, kParallelize2DRangeJ,
+			0 /* flags */);
+	}
+
+	for (size_t i = 0; i < kParallelize2DRangeI; i++) {
+		for (size_t j = 0; j < kParallelize2DRangeJ; j++) {
+			const size_t linear_idx = i * kParallelize2DRangeJ + j;
+			EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+				<< "Element (" << i << ", " << j << ") was processed "
+				<< counters[linear_idx].load(std::memory_order_relaxed) << " times "
+				<< "(expected: " << kIncrementIterations << ")";
+		}
+	}
+}
+
+TEST(Parallelize2DWithThread, MultiThreadPoolEachItemProcessedMultipleTimes) {
+	std::vector<std::atomic_int> counters(kParallelize2DRangeI * kParallelize2DRangeJ);
+
+	auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+	ASSERT_TRUE(threadpool.get());
+
+	if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+		GTEST_SKIP();
+	}
+
+	for (size_t iteration = 0; iteration < kIncrementIterations; iteration++) {
+		pthreadpool_parallelize_2d_with_thread(
+			threadpool.get(),
+			reinterpret_cast<pthreadpool_task_2d_with_thread_t>(Increment2DWithThread),
+			static_cast<void*>(counters.data()),
+			kParallelize2DRangeI, kParallelize2DRangeJ,
+			0 /* flags */);
+	}
+
+	for (size_t i = 0; i < kParallelize2DRangeI; i++) {
+		for (size_t j = 0; j < kParallelize2DRangeJ; j++) {
+			const size_t linear_idx = i * kParallelize2DRangeJ + j;
+			EXPECT_EQ(counters[linear_idx].load(std::memory_order_relaxed), kIncrementIterations)
+				<< "Element (" << i << ", " << j << ") was processed "
+				<< counters[linear_idx].load(std::memory_order_relaxed) << " times "
+				<< "(expected: " << kIncrementIterations << ")";
+		}
+	}
+}
+
+static void IncrementSame2DWithThread(std::atomic_int* num_processed_items, size_t, size_t i, size_t j) {
+	num_processed_items->fetch_add(1, std::memory_order_relaxed);
+}
+
+TEST(Parallelize2DWithThread, MultiThreadPoolHighContention) {
+	std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+	auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+	ASSERT_TRUE(threadpool.get());
+
+	if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+		GTEST_SKIP();
+	}
+
+	pthreadpool_parallelize_2d_with_thread(
+		threadpool.get(),
+		reinterpret_cast<pthreadpool_task_2d_with_thread_t>(IncrementSame2DWithThread),
+		static_cast<void*>(&num_processed_items),
+		kParallelize2DRangeI, kParallelize2DRangeJ,
+		0 /* flags */);
+	EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize2DRangeI * kParallelize2DRangeJ);
+}
+
+static void WorkImbalance2DWithThread(std::atomic_int* num_processed_items, size_t, size_t i, size_t j) {
+	num_processed_items->fetch_add(1, std::memory_order_relaxed);
+	if (i == 0 && j == 0) {
+		/* Spin-wait until all items are computed */
+		while (num_processed_items->load(std::memory_order_relaxed) != kParallelize2DRangeI * kParallelize2DRangeJ) {
+			std::atomic_thread_fence(std::memory_order_acquire);
+		}
+	}
+}
+
+TEST(Parallelize2DWithThread, MultiThreadPoolWorkStealing) {
+	std::atomic_int num_processed_items = ATOMIC_VAR_INIT(0);
+
+	auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+	ASSERT_TRUE(threadpool.get());
+
+	if (pthreadpool_get_threads_count(threadpool.get()) <= 1) {
+		GTEST_SKIP();
+	}
+
+	pthreadpool_parallelize_2d_with_thread(
+		threadpool.get(),
+		reinterpret_cast<pthreadpool_task_2d_with_thread_t>(WorkImbalance2DWithThread),
+		static_cast<void*>(&num_processed_items),
+		kParallelize2DRangeI, kParallelize2DRangeJ,
+		0 /* flags */);
+	EXPECT_EQ(num_processed_items.load(std::memory_order_relaxed), kParallelize2DRangeI * kParallelize2DRangeJ);
+}
+
+static void CheckThreadIndexValid2DWithThread(const size_t* num_threads, size_t thread_index, size_t, size_t) {
+	EXPECT_LE(thread_index, *num_threads);
+}
+
+TEST(Parallelize2DWithThread, MultiThreadPoolThreadIndexValid) {
+	auto_pthreadpool_t threadpool(pthreadpool_create(0), pthreadpool_destroy);
+	ASSERT_TRUE(threadpool.get());
+
+	size_t num_threads = pthreadpool_get_threads_count(threadpool.get());
+	if (num_threads <= 1) {
+		GTEST_SKIP();
+	}
+
+	pthreadpool_parallelize_2d_with_thread(
+		threadpool.get(),
+		reinterpret_cast<pthreadpool_task_2d_with_thread_t>(CheckThreadIndexValid2DWithThread),
+		static_cast<void*>(&num_threads),
+		kParallelize2DRangeI, kParallelize2DRangeJ,
+		0 /* flags */);
+}
+
 static void ComputeNothing2DTile1D(void*, size_t, size_t, size_t) {
 }