Skip to content
Open
Changes from 1 commit
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
ccd0bbc
draft: impl lazy input consumption in mp.Pool.imap(_unordered)
Jul 20, 2025
002ef46
Use semaphore to synchronize threads
Jul 20, 2025
6e0bc58
Update buffersize behavior to match concurrent.futures.Executor behavior
Jul 21, 2025
62b2b6a
Release all `buffersize_lock` obj from the parent thread when terminate
Jul 21, 2025
0b6ba41
Add 2 basic `ThreadPool.imap()` tests w/ and w/o buffersize
Jul 21, 2025
aade15e
Fix accidental swap in imports
Jul 21, 2025
fb38a72
clear Pool._taskqueue_buffersize_semaphores safely
Jul 21, 2025
6ef488b
Slightly optimize Pool._taskqueue_buffersize_semaphores terminate
Jul 21, 2025
1716725
Rename `Pool.imap()` buffersize-related tests
Jul 21, 2025
9b43cd0
Fix typo in `IMapIterator.__init__()`
Jul 22, 2025
2d89341
Add tests for buffersize combinations with other kwargs
Jul 22, 2025
9ab2705
Remove if-branch in `_terminate_pool`
Jul 27, 2025
a955003
Add more edge-case tests for `imap` and `imap_unodered`
Jul 27, 2025
80efd6e
Split inf iterable test for `imap` and `imap_unordered`
Jul 27, 2025
83d6930
Add doc for `buffersize` argument of `imap` and `imap_unordered`
Jul 27, 2025
995ad8c
add *versionadded* for `imap_unordered`
Jul 28, 2025
3b6ad65
Remove ambiguity in `buffersize` description.
Jul 28, 2025
c941c16
Set *versionadded* as next in docs
Jul 28, 2025
d09e891
Add whatsnew entry
Jul 28, 2025
9c6d89d
Fix aggreed comments on code formatting/minor refactoring
Jul 28, 2025
4550a01
Remove `imap` and `imap_unordered` body code duplication
Jul 28, 2025
77bde4d
Merge branch 'main' into feature/add-buffersize-to-multiprocessing
obaltian Aug 31, 2025
aec39fc
Merge branch 'main' into feature/add-buffersize-to-multiprocessing
obaltian Sep 3, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Use semaphore to synchronize threads
Using `threading.Semaphore` makes it easier to cap the number of
concurrently ran tasks. It also makes it possible to remove busy
wait in child thread by waiting for semaphore.

Also I've updated code to use the backpressure pattern - the new
tasks are scheduled as soon as the user consumes the old ones.
  • Loading branch information
Oleksandr Baltian authored and obaltian committed Aug 14, 2025
commit 002ef46d842dbbfea7098577cd60f6f35038cba7
112 changes: 49 additions & 63 deletions Lib/multiprocessing/pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#

import collections
import functools
import itertools
import os
import queue
Expand Down Expand Up @@ -395,32 +396,20 @@ def _guarded_task_generation(self, result_job, func, iterable):
yield (result_job, i+1, _helper_reraises_exception, (e,), {})

def _guarded_task_generation_lazy(self, result_job, func, iterable,
lazy_task_gen_helper):
'''Provides a generator of tasks for imap and imap_unordered with
backpressure_sema):
"""Provides a generator of tasks for imap and imap_unordered with
appropriate handling for iterables which throw exceptions during
iteration.'''
if not lazy_task_gen_helper.feature_enabled:
yield from self._guarded_task_generation(result_job, func, iterable)
return

iteration."""
try:
i = -1
enumerated_iter = iter(enumerate(iterable))
thread = threading.current_thread()
max_generated_tasks = self._processes + lazy_task_gen_helper.buffersize

while thread._state == RUN:
with lazy_task_gen_helper.iterator_cond:
if lazy_task_gen_helper.not_finished_tasks >= max_generated_tasks:
continue # wait for some task to be (picked up and) finished

while True:
backpressure_sema.acquire()
try:
i, x = enumerated_iter.__next__()
i, x = next(enumerated_iter)
except StopIteration:
break

yield (result_job, i, func, (x,), {})
lazy_task_gen_helper.tasks_generated += 1

except Exception as e:
yield (result_job, i+1, _helper_reraises_exception, (e,), {})
Expand All @@ -430,31 +419,32 @@ def imap(self, func, iterable, chunksize=1, buffersize=None):
Equivalent of `map()` -- can be MUCH slower than `Pool.map()`.
'''
self._check_running()
if chunksize < 1:
raise ValueError("Chunksize must be 1+, not {0:n}".format(chunksize))

result = IMapIterator(self, buffersize)

if result._backpressure_sema is None:
task_generation = self._guarded_task_generation
else:
task_generation = functools.partial(
self._guarded_task_generation_lazy,
backpressure_sema=result._backpressure_sema,
)

if chunksize == 1:
result = IMapIterator(self, buffersize)
self._taskqueue.put(
(
self._guarded_task_generation_lazy(result._job,
func,
iterable,
result._lazy_task_gen_helper),
task_generation(result._job, func, iterable),
result._set_length,
)
)
return result
else:
if chunksize < 1:
raise ValueError(
"Chunksize must be 1+, not {0:n}".format(
chunksize))
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapIterator(self, buffersize)
self._taskqueue.put(
(
self._guarded_task_generation_lazy(result._job,
mapstar,
task_batches,
result._lazy_task_gen_helper),
task_generation(result._job, mapstar, task_batches),
result._set_length,
)
)
Expand All @@ -465,30 +455,34 @@ def imap_unordered(self, func, iterable, chunksize=1, buffersize=None):
Like `imap()` method but ordering of results is arbitrary.
'''
self._check_running()
if chunksize < 1:
raise ValueError(
"Chunksize must be 1+, not {0!r}".format(chunksize)
)

result = IMapUnorderedIterator(self, buffersize)

if result._backpressure_sema is None:
task_generation = self._guarded_task_generation
else:
task_generation = functools.partial(
self._guarded_task_generation_lazy,
backpressure_sema=result._backpressure_sema,
)

if chunksize == 1:
result = IMapUnorderedIterator(self, buffersize)
self._taskqueue.put(
(
self._guarded_task_generation_lazy(result._job,
func,
iterable,
result._lazy_task_gen_helper),
task_generation(result._job, func, iterable),
result._set_length,
)
)
return result
else:
if chunksize < 1:
raise ValueError(
"Chunksize must be 1+, not {0!r}".format(chunksize))
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapUnorderedIterator(self, buffersize)
self._taskqueue.put(
(
self._guarded_task_generation_lazy(result._job,
mapstar,
task_batches,
result._lazy_task_gen_helper),
task_generation(result._job, mapstar, task_batches),
result._set_length,
)
)
Expand Down Expand Up @@ -889,7 +883,13 @@ def __init__(self, pool, buffersize):
self._length = None
self._unsorted = {}
self._cache[self._job] = self
self._lazy_task_gen_helper = _LazyTaskGenHelper(buffersize, self._cond)

if buffersize is None:
self._backpressure_sema = None
else:
self._backpressure_sema = threading.Semaphore(
value=self._pool._processes + buffersize
)

def __iter__(self):
return self
Expand All @@ -910,7 +910,9 @@ def next(self, timeout=None):
self._pool = None
raise StopIteration from None
raise TimeoutError from None
self._lazy_task_gen_helper.tasks_finished += 1

if self._backpressure_sema:
self._backpressure_sema.release()

success, value = item
if success:
Expand Down Expand Up @@ -959,22 +961,6 @@ def _set(self, i, obj):
del self._cache[self._job]
self._pool = None

#
# Class to store stats for lazy task generation and share them
# between the main thread and `_guarded_task_generation()` thread.
#
class _LazyTaskGenHelper(object):
def __init__(self, buffersize, iterator_cond):
self.feature_enabled = buffersize is not None
self.buffersize = buffersize
self.tasks_generated = 0
self.tasks_finished = 0
self.iterator_cond = iterator_cond

@property
def not_finished_tasks(self):
return self.tasks_generated - self.tasks_finished

#
#
#
Expand Down