From c1d944ece0eaeb5b1ea030f22a0e259d8fd915fd Mon Sep 17 00:00:00 2001 From: Ian Thomas Date: Tue, 27 Feb 2024 13:20:51 +0000 Subject: [PATCH 01/97] Remove control queue (#1210) Co-authored-by: David Brochart Co-authored-by: Steven Silvester --- ipykernel/inprocess/ipkernel.py | 3 -- ipykernel/kernelbase.py | 58 +++++++++------------------------ pyproject.toml | 6 ++-- tests/test_ipkernel_direct.py | 12 ------- tests/test_kernel.py | 42 ++++++++++++++++++++++++ 5 files changed, 61 insertions(+), 60 deletions(-) diff --git a/ipykernel/inprocess/ipkernel.py b/ipykernel/inprocess/ipkernel.py index 873b96d20..7af64aedd 100644 --- a/ipykernel/inprocess/ipkernel.py +++ b/ipykernel/inprocess/ipkernel.py @@ -88,9 +88,6 @@ def start(self): def _abort_queues(self): """The in-process kernel doesn't abort requests.""" - async def _flush_control_queue(self): - """No need to flush control queues for in-process""" - def _input_request(self, prompt, ident, parent, password=False): # Flush output before making the request. self.raw_input_str = None diff --git a/ipykernel/kernelbase.py b/ipykernel/kernelbase.py index a24e32380..01539fd22 100644 --- a/ipykernel/kernelbase.py +++ b/ipykernel/kernelbase.py @@ -5,7 +5,6 @@ from __future__ import annotations import asyncio -import concurrent.futures import inspect import itertools import logging @@ -289,49 +288,16 @@ def __init__(self, **kwargs): for msg_type in self.control_msg_types: self.control_handlers[msg_type] = getattr(self, msg_type) - self.control_queue: Queue[t.Any] = Queue() - # Storing the accepted parameters for do_execute, used in execute_request self._do_exec_accepted_params = _accepts_parameters( self.do_execute, ["cell_meta", "cell_id"] ) - def dispatch_control(self, msg): - self.control_queue.put_nowait(msg) - - async def poll_control_queue(self): - while True: - msg = await self.control_queue.get() - # handle tracers from _flush_control_queue - if isinstance(msg, (concurrent.futures.Future, asyncio.Future)): - msg.set_result(None) - continue + async def dispatch_control(self, msg): + # Ensure only one control message is processed at a time + async with asyncio.Lock(): await self.process_control(msg) - async def _flush_control_queue(self): - """Flush the control queue, wait for processing of any pending messages""" - tracer_future: concurrent.futures.Future[object] | asyncio.Future[object] - if self.control_thread: - control_loop = self.control_thread.io_loop - # concurrent.futures.Futures are threadsafe - # and can be used to await across threads - tracer_future = concurrent.futures.Future() - awaitable_future = asyncio.wrap_future(tracer_future) - else: - control_loop = self.io_loop - tracer_future = awaitable_future = asyncio.Future() - - def _flush(): - # control_stream.flush puts messages on the queue - if self.control_stream: - self.control_stream.flush() - # put Future on the queue after all of those, - # so we can wait for all queued messages to be processed - self.control_queue.put(tracer_future) - - control_loop.add_callback(_flush) - return awaitable_future - async def process_control(self, msg): """dispatch control requests""" if not self.session: @@ -387,8 +353,6 @@ async def dispatch_shell(self, msg): """dispatch shell requests""" if not self.session: return - # flush control queue before handling shell requests - await self._flush_control_queue() idents, msg = self.session.feed_identities(msg, copy=False) try: @@ -531,6 +495,19 @@ async def process_one(self, wait=True): t, dispatch, args = self.msg_queue.get_nowait() except (asyncio.QueueEmpty, QueueEmpty): return + + if self.control_thread is None and self.control_stream is not None: + # If there isn't a separate control thread then this main thread handles both shell + # and control messages. Before processing a shell message we need to flush all control + # messages and allow them all to be processed. + await asyncio.sleep(0) + self.control_stream.flush() + + socket = self.control_stream.socket + while socket.poll(1): + await asyncio.sleep(0) + self.control_stream.flush() + await dispatch(*args) async def dispatch_queue(self): @@ -578,9 +555,6 @@ def start(self): if self.control_stream: self.control_stream.on_recv(self.dispatch_control, copy=False) - control_loop = self.control_thread.io_loop if self.control_thread else self.io_loop - - asyncio.run_coroutine_threadsafe(self.poll_control_queue(), control_loop.asyncio_loop) if self.shell_stream: self.shell_stream.on_recv( partial( diff --git a/pyproject.toml b/pyproject.toml index c2ed3fc48..1bd260c27 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,14 +24,14 @@ dependencies = [ "ipython>=7.23.1", "comm>=0.1.1", "traitlets>=5.4.0", - "jupyter_client>=6.1.12", + "jupyter_client>=8.0.0", "jupyter_core>=4.12,!=5.0.*", # For tk event loop support only. "nest_asyncio", - "tornado>=6.1", + "tornado>=6.2", "matplotlib-inline>=0.1", 'appnope;platform_system=="Darwin"', - "pyzmq>=24", + "pyzmq>=25", "psutil", "packaging", ] diff --git a/tests/test_ipkernel_direct.py b/tests/test_ipkernel_direct.py index c9201348c..037489f34 100644 --- a/tests/test_ipkernel_direct.py +++ b/tests/test_ipkernel_direct.py @@ -164,41 +164,29 @@ def test_dispatch_debugpy(ipkernel: IPythonKernel) -> None: async def test_start(ipkernel: IPythonKernel) -> None: shell_future: asyncio.Future = asyncio.Future() - control_future: asyncio.Future = asyncio.Future() async def fake_dispatch_queue(): shell_future.set_result(None) - async def fake_poll_control_queue(): - control_future.set_result(None) - ipkernel.dispatch_queue = fake_dispatch_queue # type:ignore - ipkernel.poll_control_queue = fake_poll_control_queue # type:ignore ipkernel.start() ipkernel.debugpy_stream = None ipkernel.start() await ipkernel.process_one(False) await shell_future - await control_future async def test_start_no_debugpy(ipkernel: IPythonKernel) -> None: shell_future: asyncio.Future = asyncio.Future() - control_future: asyncio.Future = asyncio.Future() async def fake_dispatch_queue(): shell_future.set_result(None) - async def fake_poll_control_queue(): - control_future.set_result(None) - ipkernel.dispatch_queue = fake_dispatch_queue # type:ignore - ipkernel.poll_control_queue = fake_poll_control_queue # type:ignore ipkernel.debugpy_stream = None ipkernel.start() await shell_future - await control_future def test_create_comm(): diff --git a/tests/test_kernel.py b/tests/test_kernel.py index 313388965..a0bd8334e 100644 --- a/tests/test_kernel.py +++ b/tests/test_kernel.py @@ -10,6 +10,7 @@ import subprocess import sys import time +from datetime import datetime, timedelta from subprocess import Popen from tempfile import TemporaryDirectory @@ -597,6 +598,47 @@ def test_control_thread_priority(): assert control_dates[-1] <= shell_dates[0] +def test_sequential_control_messages(): + with new_kernel() as kc: + msg_id = kc.execute("import time") + get_reply(kc, msg_id) + + # Send multiple messages on the control channel. + # Using execute messages to vary duration. + sleeps = [0.6, 0.3, 0.1] + + # Prepare messages + msgs = [ + kc.session.msg("execute_request", {"code": f"time.sleep({sleep})"}) for sleep in sleeps + ] + msg_ids = [msg["header"]["msg_id"] for msg in msgs] + + # Submit messages + for msg in msgs: + kc.control_channel.send(msg) + + # Get replies + replies = [get_reply(kc, msg_id, channel="control") for msg_id in msg_ids] + + # Check messages are processed in order, one at a time, and of a sensible duration. + previous_end = None + for reply, sleep in zip(replies, sleeps): + start_str = reply["metadata"]["started"] + if sys.version_info[:2] < (3, 11) and start_str.endswith("Z"): + # Python < 3.11 doesn't support "Z" suffix in datetime.fromisoformat, + # so use alternative timezone format. + # https://github.com/python/cpython/issues/80010 + start_str = start_str[:-1] + "+00:00" + start = datetime.fromisoformat(start_str) + end = reply["header"]["date"] # Already a datetime + + if previous_end is not None: + assert start > previous_end + previous_end = end + + assert end >= start + timedelta(seconds=sleep) + + def _child(): print("in child", os.getpid()) From d6474e9420c9601f3a307ee899ddd7efb74b9431 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 29 Feb 2024 09:34:18 -0600 Subject: [PATCH 02/97] Fix side effect import for pickleutil (#1217) --- ipykernel/pickleutil.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ipykernel/pickleutil.py b/ipykernel/pickleutil.py index c6a2e4484..6f1565943 100644 --- a/ipykernel/pickleutil.py +++ b/ipykernel/pickleutil.py @@ -10,6 +10,10 @@ from types import FunctionType # This registers a hook when it's imported +try: + from ipyparallel.serialize import codeutil # noqa: F401 +except ImportError: + pass from traitlets.log import get_logger from traitlets.utils.importstring import import_item From cdc988a894bcfaabc8369b8ae9c945e1061f748d Mon Sep 17 00:00:00 2001 From: Ian Thomas Date: Mon, 4 Mar 2024 16:01:07 +0000 Subject: [PATCH 03/97] Allow datetime or str in test_sequential_control_messages (#1219) Co-authored-by: Steven Silvester --- pyproject.toml | 2 +- tests/test_kernel.py | 23 ++++++++++++++--------- 2 files changed, 15 insertions(+), 10 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 1bd260c27..3f91deb2e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,7 @@ docs = [ "trio" ] test = [ - "pytest>=7.0", + "pytest>=7.0,<8", "pytest-cov", "flaky", "ipyparallel", diff --git a/tests/test_kernel.py b/tests/test_kernel.py index a0bd8334e..88f02ae9a 100644 --- a/tests/test_kernel.py +++ b/tests/test_kernel.py @@ -620,20 +620,25 @@ def test_sequential_control_messages(): # Get replies replies = [get_reply(kc, msg_id, channel="control") for msg_id in msg_ids] + def ensure_datetime(arg): + # Support arg which is a datetime or str. + if isinstance(arg, str): + if sys.version_info[:2] < (3, 11) and arg.endswith("Z"): + # Python < 3.11 doesn't support "Z" suffix in datetime.fromisoformat, + # so use alternative timezone format. + # https://github.com/python/cpython/issues/80010 + arg = arg[:-1] + "+00:00" + return datetime.fromisoformat(arg) + return arg + # Check messages are processed in order, one at a time, and of a sensible duration. previous_end = None for reply, sleep in zip(replies, sleeps): - start_str = reply["metadata"]["started"] - if sys.version_info[:2] < (3, 11) and start_str.endswith("Z"): - # Python < 3.11 doesn't support "Z" suffix in datetime.fromisoformat, - # so use alternative timezone format. - # https://github.com/python/cpython/issues/80010 - start_str = start_str[:-1] + "+00:00" - start = datetime.fromisoformat(start_str) - end = reply["header"]["date"] # Already a datetime + start = ensure_datetime(reply["metadata"]["started"]) + end = ensure_datetime(reply["header"]["date"]) if previous_end is not None: - assert start > previous_end + assert start >= previous_end previous_end = end assert end >= start + timedelta(seconds=sleep) From 830829fd344849c69027a7a198f745e7b84bcb28 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Tue, 12 Mar 2024 07:17:52 -0500 Subject: [PATCH 04/97] Update Release Scripts (#1221) --- .github/workflows/prep-release.yml | 9 ++++++- .github/workflows/publish-changelog.yml | 34 +++++++++++++++++++++++++ .github/workflows/publish-release.yml | 22 ++++++++-------- 3 files changed, 54 insertions(+), 11 deletions(-) create mode 100644 .github/workflows/publish-changelog.yml diff --git a/.github/workflows/prep-release.yml b/.github/workflows/prep-release.yml index 7a2a18de7..396330bb9 100644 --- a/.github/workflows/prep-release.yml +++ b/.github/workflows/prep-release.yml @@ -12,6 +12,10 @@ on: post_version_spec: description: "Post Version Specifier" required: false + silent: + description: "Set a placeholder in the changelog and don't publish the release." + required: false + type: boolean since: description: "Use PRs with activity since this date or git reference" required: false @@ -22,6 +26,8 @@ on: jobs: prep_release: runs-on: ubuntu-latest + permissions: + contents: write steps: - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 @@ -29,8 +35,9 @@ jobs: id: prep-release uses: jupyter-server/jupyter_releaser/.github/actions/prep-release@v2 with: - token: ${{ secrets.ADMIN_GITHUB_TOKEN }} + token: ${{ secrets.GITHUB_TOKEN }} version_spec: ${{ github.event.inputs.version_spec }} + silent: ${{ github.event.inputs.silent }} post_version_spec: ${{ github.event.inputs.post_version_spec }} target: ${{ github.event.inputs.target }} branch: ${{ github.event.inputs.branch }} diff --git a/.github/workflows/publish-changelog.yml b/.github/workflows/publish-changelog.yml new file mode 100644 index 000000000..60af4c5f1 --- /dev/null +++ b/.github/workflows/publish-changelog.yml @@ -0,0 +1,34 @@ +name: "Publish Changelog" +on: + release: + types: [published] + + workflow_dispatch: + inputs: + branch: + description: "The branch to target" + required: false + +jobs: + publish_changelog: + runs-on: ubuntu-latest + environment: release + steps: + - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 + + - uses: actions/create-github-app-token@v1 + id: app-token + with: + app-id: ${{ vars.APP_ID }} + private-key: ${{ secrets.APP_PRIVATE_KEY }} + + - name: Publish changelog + id: publish-changelog + uses: jupyter-server/jupyter_releaser/.github/actions/publish-changelog@v2 + with: + token: ${{ steps.app-token.outputs.token }} + branch: ${{ github.event.inputs.branch }} + + - name: "** Next Step **" + run: | + echo "Merge the changelog update PR: ${{ steps.publish-changelog.outputs.pr_url }}" diff --git a/.github/workflows/publish-release.yml b/.github/workflows/publish-release.yml index dbaaeaad2..5295e776b 100644 --- a/.github/workflows/publish-release.yml +++ b/.github/workflows/publish-release.yml @@ -15,30 +15,32 @@ on: jobs: publish_release: runs-on: ubuntu-latest + environment: release + permissions: + id-token: write steps: - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 + - uses: actions/create-github-app-token@v1 + id: app-token + with: + app-id: ${{ vars.APP_ID }} + private-key: ${{ secrets.APP_PRIVATE_KEY }} + - name: Populate Release id: populate-release uses: jupyter-server/jupyter_releaser/.github/actions/populate-release@v2 with: - token: ${{ secrets.ADMIN_GITHUB_TOKEN }} - target: ${{ github.event.inputs.target }} + token: ${{ steps.app-token.outputs.token }} branch: ${{ github.event.inputs.branch }} release_url: ${{ github.event.inputs.release_url }} steps_to_skip: ${{ github.event.inputs.steps_to_skip }} - name: Finalize Release id: finalize-release - env: - PYPI_TOKEN: ${{ secrets.PYPI_TOKEN }} - PYPI_TOKEN_MAP: ${{ secrets.PYPI_TOKEN_MAP }} - TWINE_USERNAME: __token__ - NPM_TOKEN: ${{ secrets.NPM_TOKEN }} - uses: jupyter-server/jupyter-releaser/.github/actions/finalize-release@v2 + uses: jupyter-server/jupyter_releaser/.github/actions/finalize-release@v2 with: - token: ${{ secrets.ADMIN_GITHUB_TOKEN }} - target: ${{ github.event.inputs.target }} + token: ${{ steps.app-token.outputs.token }} release_url: ${{ steps.populate-release.outputs.release_url }} - name: "** Next Step **" From 772dfb8ab87b524bcc6f34dd1f631cf05126e25c Mon Sep 17 00:00:00 2001 From: David Brochart Date: Fri, 22 Mar 2024 18:26:08 +0100 Subject: [PATCH 05/97] Replace Tornado with AnyIO (#1079) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Steven Silvester Co-authored-by: Steven Silvester --- .github/workflows/ci.yml | 2 +- docs/api/ipykernel.inprocess.rst | 6 + examples/embedding/inprocess_terminal.py | 40 +- ipykernel/control.py | 24 +- ipykernel/debugger.py | 66 +-- ipykernel/eventloops.py | 9 +- ipykernel/inprocess/blocking.py | 5 +- ipykernel/inprocess/client.py | 48 +-- ipykernel/inprocess/ipkernel.py | 31 +- ipykernel/inprocess/manager.py | 26 +- ipykernel/inprocess/session.py | 41 ++ ipykernel/inprocess/socket.py | 48 ++- ipykernel/iostream.py | 156 +++++--- ipykernel/ipkernel.py | 216 +++++----- ipykernel/kernelapp.py | 59 ++- ipykernel/kernelbase.py | 488 +++++++++-------------- ipykernel/zmqshell.py | 11 +- pyproject.toml | 20 +- tests/conftest.py | 102 +++-- tests/inprocess/test_kernel.py | 44 +- tests/inprocess/test_kernelmanager.py | 121 +++--- tests/test_async.py | 21 +- tests/test_embed_kernel.py | 2 +- tests/test_eventloop.py | 2 +- tests/test_io.py | 47 ++- tests/test_ipkernel_direct.py | 12 +- tests/test_kernel_direct.py | 24 +- tests/test_kernelapp.py | 15 +- tests/test_message_spec.py | 17 +- tests/test_pickleutil.py | 6 + 30 files changed, 878 insertions(+), 831 deletions(-) create mode 100644 ipykernel/inprocess/session.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e6d7e864a..e3b56ba25 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -84,8 +84,8 @@ jobs: - name: Run Linters run: | hatch run typing:test + pipx run interrogate -vv . --fail-under 90 hatch run lint:build - pipx run interrogate -vv . pipx run doc8 --max-line-length=200 check_release: diff --git a/docs/api/ipykernel.inprocess.rst b/docs/api/ipykernel.inprocess.rst index c2d6536bc..344561023 100644 --- a/docs/api/ipykernel.inprocess.rst +++ b/docs/api/ipykernel.inprocess.rst @@ -41,6 +41,12 @@ Submodules :show-inheritance: +.. automodule:: ipykernel.inprocess.session + :members: + :undoc-members: + :show-inheritance: + + .. automodule:: ipykernel.inprocess.socket :members: :undoc-members: diff --git a/examples/embedding/inprocess_terminal.py b/examples/embedding/inprocess_terminal.py index b644c94af..c951859e8 100644 --- a/examples/embedding/inprocess_terminal.py +++ b/examples/embedding/inprocess_terminal.py @@ -1,8 +1,7 @@ """An in-process terminal example.""" import os -import sys -import tornado +from anyio import run from jupyter_console.ptshell import ZMQTerminalInteractiveShell from ipykernel.inprocess.manager import InProcessKernelManager @@ -13,46 +12,15 @@ def print_process_id(): print("Process ID is:", os.getpid()) -def init_asyncio_patch(): - """set default asyncio policy to be compatible with tornado - Tornado 6 (at least) is not compatible with the default - asyncio implementation on Windows - Pick the older SelectorEventLoopPolicy on Windows - if the known-incompatible default policy is in use. - do this as early as possible to make it a low priority and overridable - ref: https://github.com/tornadoweb/tornado/issues/2608 - FIXME: if/when tornado supports the defaults in asyncio, - remove and bump tornado requirement for py38 - """ - if ( - sys.platform.startswith("win") - and sys.version_info >= (3, 8) - and tornado.version_info < (6, 1) - ): - import asyncio - - try: - from asyncio import WindowsProactorEventLoopPolicy, WindowsSelectorEventLoopPolicy - except ImportError: - pass - # not affected - else: - if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy: - # WindowsProactorEventLoopPolicy is not compatible with tornado 6 - # fallback to the pre-3.8 default of Selector - asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy()) - - -def main(): +async def main(): """The main function.""" print_process_id() # Create an in-process kernel # >>> print_process_id() # will print the same process ID as the main process - init_asyncio_patch() kernel_manager = InProcessKernelManager() - kernel_manager.start_kernel() + await kernel_manager.start_kernel() kernel = kernel_manager.kernel kernel.gui = "qt4" kernel.shell.push({"foo": 43, "print_process_id": print_process_id}) @@ -64,4 +32,4 @@ def main(): if __name__ == "__main__": - main() + run(main) diff --git a/ipykernel/control.py b/ipykernel/control.py index 0ee0fad05..a70377c03 100644 --- a/ipykernel/control.py +++ b/ipykernel/control.py @@ -1,7 +1,7 @@ """A thread for a control channel.""" -from threading import Thread +from threading import Event, Thread -from tornado.ioloop import IOLoop +from anyio import create_task_group, run, to_thread CONTROL_THREAD_NAME = "Control" @@ -12,21 +12,29 @@ class ControlThread(Thread): def __init__(self, **kwargs): """Initialize the thread.""" Thread.__init__(self, name=CONTROL_THREAD_NAME, **kwargs) - self.io_loop = IOLoop(make_current=False) self.pydev_do_not_trace = True self.is_pydev_daemon_thread = True + self.__stop = Event() + self._task = None + + def set_task(self, task): + self._task = task def run(self): """Run the thread.""" self.name = CONTROL_THREAD_NAME - try: - self.io_loop.start() - finally: - self.io_loop.close() + run(self._main) + + async def _main(self): + async with create_task_group() as tg: + if self._task is not None: + tg.start_soon(self._task) + await to_thread.run_sync(self.__stop.wait) + tg.cancel_scope.cancel() def stop(self): """Stop the thread. This method is threadsafe. """ - self.io_loop.add_callback(self.io_loop.stop) + self.__stop.set() diff --git a/ipykernel/debugger.py b/ipykernel/debugger.py index fd192e157..8680793fd 100644 --- a/ipykernel/debugger.py +++ b/ipykernel/debugger.py @@ -3,13 +3,13 @@ import re import sys import typing as t +from math import inf from pathlib import Path import zmq +from anyio import Event, create_memory_object_stream from IPython.core.getipython import get_ipython from IPython.core.inputtransformer2 import leading_empty_lines -from tornado.locks import Event -from tornado.queues import Queue from zmq.utils import jsonapi try: @@ -117,7 +117,9 @@ def __init__(self, event_callback, log): self.tcp_buffer = "" self._reset_tcp_pos() self.event_callback = event_callback - self.message_queue: Queue[t.Any] = Queue() + self.message_send_stream, self.message_receive_stream = create_memory_object_stream[dict]( + max_buffer_size=inf + ) self.log = log def _reset_tcp_pos(self): @@ -136,7 +138,7 @@ def _put_message(self, raw_msg): else: self.log.debug("QUEUE - put message:") self.log.debug(msg) - self.message_queue.put_nowait(msg) + self.message_send_stream.send_nowait(msg) def put_tcp_frame(self, frame): """Put a tcp frame in the queue.""" @@ -187,25 +189,31 @@ def put_tcp_frame(self, frame): async def get_message(self): """Get a message from the queue.""" - return await self.message_queue.get() + return await self.message_receive_stream.receive() class DebugpyClient: """A client for debugpy.""" - def __init__(self, log, debugpy_stream, event_callback): + def __init__(self, log, debugpy_socket, event_callback): """Initialize the client.""" self.log = log - self.debugpy_stream = debugpy_stream + self.debugpy_socket = debugpy_socket self.event_callback = event_callback self.message_queue = DebugpyMessageQueue(self._forward_event, self.log) self.debugpy_host = "127.0.0.1" self.debugpy_port = -1 self.routing_id = None self.wait_for_attach = True - self.init_event = Event() + self._init_event = None self.init_event_seq = -1 + @property + def init_event(self): + if self._init_event is None: + self._init_event = Event() + return self._init_event + def _get_endpoint(self): host, port = self.get_host_port() return "tcp://" + host + ":" + str(port) @@ -216,9 +224,9 @@ def _forward_event(self, msg): self.init_event_seq = msg["seq"] self.event_callback(msg) - def _send_request(self, msg): + async def _send_request(self, msg): if self.routing_id is None: - self.routing_id = self.debugpy_stream.socket.getsockopt(ROUTING_ID) + self.routing_id = self.debugpy_socket.getsockopt(ROUTING_ID) content = jsonapi.dumps( msg, default=json_default, @@ -233,7 +241,7 @@ def _send_request(self, msg): self.log.debug("DEBUGPYCLIENT:") self.log.debug(self.routing_id) self.log.debug(buf) - self.debugpy_stream.send_multipart((self.routing_id, buf)) + await self.debugpy_socket.send_multipart((self.routing_id, buf)) async def _wait_for_response(self): # Since events are never pushed to the message_queue @@ -251,7 +259,7 @@ async def _handle_init_sequence(self): "seq": int(self.init_event_seq) + 1, "command": "configurationDone", } - self._send_request(configurationDone) + await self._send_request(configurationDone) # 3] Waits for configurationDone response await self._wait_for_response() @@ -262,7 +270,7 @@ async def _handle_init_sequence(self): def get_host_port(self): """Get the host debugpy port.""" if self.debugpy_port == -1: - socket = self.debugpy_stream.socket + socket = self.debugpy_socket socket.bind_to_random_port("tcp://" + self.debugpy_host) self.endpoint = socket.getsockopt(zmq.LAST_ENDPOINT).decode("utf-8") socket.unbind(self.endpoint) @@ -272,14 +280,13 @@ def get_host_port(self): def connect_tcp_socket(self): """Connect to the tcp socket.""" - self.debugpy_stream.socket.connect(self._get_endpoint()) - self.routing_id = self.debugpy_stream.socket.getsockopt(ROUTING_ID) + self.debugpy_socket.connect(self._get_endpoint()) + self.routing_id = self.debugpy_socket.getsockopt(ROUTING_ID) def disconnect_tcp_socket(self): """Disconnect from the tcp socket.""" - self.debugpy_stream.socket.disconnect(self._get_endpoint()) + self.debugpy_socket.disconnect(self._get_endpoint()) self.routing_id = None - self.init_event = Event() self.init_event_seq = -1 self.wait_for_attach = True @@ -289,7 +296,7 @@ def receive_dap_frame(self, frame): async def send_dap_request(self, msg): """Send a dap request.""" - self._send_request(msg) + await self._send_request(msg) if self.wait_for_attach and msg["command"] == "attach": rep = await self._handle_init_sequence() self.wait_for_attach = False @@ -325,17 +332,19 @@ class Debugger: ] def __init__( - self, log, debugpy_stream, event_callback, shell_socket, session, just_my_code=True + self, log, debugpy_socket, event_callback, shell_socket, session, just_my_code=True ): """Initialize the debugger.""" self.log = log - self.debugpy_client = DebugpyClient(log, debugpy_stream, self._handle_event) + self.debugpy_client = DebugpyClient(log, debugpy_socket, self._handle_event) self.shell_socket = shell_socket self.session = session self.is_started = False self.event_callback = event_callback self.just_my_code = just_my_code - self.stopped_queue: Queue[t.Any] = Queue() + self.stopped_send_stream, self.stopped_receive_stream = create_memory_object_stream[dict]( + max_buffer_size=inf + ) self.started_debug_handlers = {} for msg_type in Debugger.started_debug_msg_types: @@ -360,7 +369,7 @@ def __init__( def _handle_event(self, msg): if msg["event"] == "stopped": if msg["body"]["allThreadsStopped"]: - self.stopped_queue.put_nowait(msg) + self.stopped_send_stream.send_nowait(msg) # Do not forward the event now, will be done in the handle_stopped_event return self.stopped_threads.add(msg["body"]["threadId"]) @@ -398,7 +407,7 @@ async def handle_stopped_event(self): """Handle a stopped event.""" # Wait for a stopped event message in the stopped queue # This message is used for triggering the 'threads' request - event = await self.stopped_queue.get() + event = await self.stopped_receive_stream.receive() req = {"seq": event["seq"] + 1, "type": "request", "command": "threads"} rep = await self._forward_message(req) for thread in rep["body"]["threads"]: @@ -410,7 +419,7 @@ async def handle_stopped_event(self): def tcp_client(self): return self.debugpy_client - def start(self): + async def start(self): """Start the debugger.""" if not self.debugpy_initialized: tmp_dir = get_tmp_directory() @@ -428,7 +437,12 @@ def start(self): (self.shell_socket.getsockopt(ROUTING_ID)), ) - ident, msg = self.session.recv(self.shell_socket, mode=0) + msg = await self.shell_socket.recv_multipart() + ident, msg = self.session.feed_identities(msg, copy=True) + try: + msg = self.session.deserialize(msg, content=True, copy=True) + except Exception: + self.log.error("Invalid message", exc_info=True) # noqa: G201 self.debugpy_initialized = msg["content"]["status"] == "ok" # Don't remove leading empty lines when debugging so the breakpoints are correctly positioned @@ -714,7 +728,7 @@ async def process_request(self, message): if self.is_started: self.log.info("The debugger has already started") else: - self.is_started = self.start() + self.is_started = await self.start() if self.is_started: self.log.info("The debugger has started") else: diff --git a/ipykernel/eventloops.py b/ipykernel/eventloops.py index 853738d9e..4c3a18cbc 100644 --- a/ipykernel/eventloops.py +++ b/ipykernel/eventloops.py @@ -415,13 +415,12 @@ def loop_asyncio(kernel): loop._should_close = False # type:ignore[attr-defined] # pause eventloop when there's an event on a zmq socket - def process_stream_events(stream): + def process_stream_events(socket): """fall back to main loop when there's a socket event""" - if stream.flush(limit=1): - loop.stop() + loop.stop() - notifier = partial(process_stream_events, kernel.shell_stream) - loop.add_reader(kernel.shell_stream.getsockopt(zmq.FD), notifier) + notifier = partial(process_stream_events, kernel.shell_socket) + loop.add_reader(kernel.shell_socket.getsockopt(zmq.FD), notifier) loop.call_soon(notifier) while True: diff --git a/ipykernel/inprocess/blocking.py b/ipykernel/inprocess/blocking.py index c598a44b4..b5c421a79 100644 --- a/ipykernel/inprocess/blocking.py +++ b/ipykernel/inprocess/blocking.py @@ -80,10 +80,10 @@ class BlockingInProcessKernelClient(InProcessKernelClient): iopub_channel_class = Type(BlockingInProcessChannel) # type:ignore[arg-type] stdin_channel_class = Type(BlockingInProcessStdInChannel) # type:ignore[arg-type] - def wait_for_ready(self): + async def wait_for_ready(self): """Wait for kernel info reply on shell channel.""" while True: - self.kernel_info() + await self.kernel_info() try: msg = self.shell_channel.get_msg(block=True, timeout=1) except Empty: @@ -103,6 +103,5 @@ def wait_for_ready(self): while True: try: msg = self.iopub_channel.get_msg(block=True, timeout=0.2) - print(msg["msg_type"]) except Empty: break diff --git a/ipykernel/inprocess/client.py b/ipykernel/inprocess/client.py index 6250302d5..8ca97470f 100644 --- a/ipykernel/inprocess/client.py +++ b/ipykernel/inprocess/client.py @@ -11,11 +11,9 @@ # Imports # ----------------------------------------------------------------------------- -import asyncio from jupyter_client.client import KernelClient from jupyter_client.clientabc import KernelClientABC -from jupyter_core.utils import run_sync # IPython imports from traitlets import Instance, Type, default @@ -102,7 +100,7 @@ def hb_channel(self): # Methods for sending specific messages # ------------------------------------- - def execute( + async def execute( self, code, silent=False, store_history=True, user_expressions=None, allow_stdin=None ): """Execute code on the client.""" @@ -116,19 +114,19 @@ def execute( allow_stdin=allow_stdin, ) msg = self.session.msg("execute_request", content) - self._dispatch_to_kernel(msg) + await self._dispatch_to_kernel(msg) return msg["header"]["msg_id"] - def complete(self, code, cursor_pos=None): + async def complete(self, code, cursor_pos=None): """Get code completion.""" if cursor_pos is None: cursor_pos = len(code) content = dict(code=code, cursor_pos=cursor_pos) msg = self.session.msg("complete_request", content) - self._dispatch_to_kernel(msg) + await self._dispatch_to_kernel(msg) return msg["header"]["msg_id"] - def inspect(self, code, cursor_pos=None, detail_level=0): + async def inspect(self, code, cursor_pos=None, detail_level=0): """Get code inspection.""" if cursor_pos is None: cursor_pos = len(code) @@ -138,14 +136,14 @@ def inspect(self, code, cursor_pos=None, detail_level=0): detail_level=detail_level, ) msg = self.session.msg("inspect_request", content) - self._dispatch_to_kernel(msg) + await self._dispatch_to_kernel(msg) return msg["header"]["msg_id"] - def history(self, raw=True, output=False, hist_access_type="range", **kwds): + async def history(self, raw=True, output=False, hist_access_type="range", **kwds): """Get code history.""" content = dict(raw=raw, output=output, hist_access_type=hist_access_type, **kwds) msg = self.session.msg("history_request", content) - self._dispatch_to_kernel(msg) + await self._dispatch_to_kernel(msg) return msg["header"]["msg_id"] def shutdown(self, restart=False): @@ -154,17 +152,17 @@ def shutdown(self, restart=False): msg = "Cannot shutdown in-process kernel" raise NotImplementedError(msg) - def kernel_info(self): + async def kernel_info(self): """Request kernel info.""" msg = self.session.msg("kernel_info_request") - self._dispatch_to_kernel(msg) + await self._dispatch_to_kernel(msg) return msg["header"]["msg_id"] - def comm_info(self, target_name=None): + async def comm_info(self, target_name=None): """Request a dictionary of valid comms and their targets.""" content = {} if target_name is None else dict(target_name=target_name) msg = self.session.msg("comm_info_request", content) - self._dispatch_to_kernel(msg) + await self._dispatch_to_kernel(msg) return msg["header"]["msg_id"] def input(self, string): @@ -174,29 +172,21 @@ def input(self, string): raise RuntimeError(msg) self.kernel.raw_input_str = string - def is_complete(self, code): + async def is_complete(self, code): """Handle an is_complete request.""" msg = self.session.msg("is_complete_request", {"code": code}) - self._dispatch_to_kernel(msg) + await self._dispatch_to_kernel(msg) return msg["header"]["msg_id"] - def _dispatch_to_kernel(self, msg): + async def _dispatch_to_kernel(self, msg): """Send a message to the kernel and handle a reply.""" kernel = self.kernel if kernel is None: - msg = "Cannot send request. No kernel exists." - raise RuntimeError(msg) + error_message = "Cannot send request. No kernel exists." + raise RuntimeError(error_message) - stream = kernel.shell_stream - self.session.send(stream, msg) - msg_parts = stream.recv_multipart() - if run_sync is not None: - dispatch_shell = run_sync(kernel.dispatch_shell) - dispatch_shell(msg_parts) - else: - loop = asyncio.get_event_loop() # type:ignore[unreachable] - loop.run_until_complete(kernel.dispatch_shell(msg_parts)) - idents, reply_msg = self.session.recv(stream, copy=False) + kernel.shell_socket.put(msg) + reply_msg = await kernel.shell_socket.get() self.shell_channel.call_handlers_later(reply_msg) def get_shell_msg(self, block=True, timeout=None): diff --git a/ipykernel/inprocess/ipkernel.py b/ipykernel/inprocess/ipkernel.py index 7af64aedd..416be5a48 100644 --- a/ipykernel/inprocess/ipkernel.py +++ b/ipykernel/inprocess/ipkernel.py @@ -7,6 +7,8 @@ import sys from contextlib import contextmanager +from anyio import TASK_STATUS_IGNORED +from anyio.abc import TaskStatus from IPython.core.interactiveshell import InteractiveShellABC from traitlets import Any, Enum, Instance, List, Type, default @@ -48,10 +50,10 @@ class InProcessKernel(IPythonKernel): # ------------------------------------------------------------------------- shell_class = Type(allow_none=True) # type:ignore[assignment] - _underlying_iopub_socket = Instance(DummySocket, ()) + _underlying_iopub_socket = Instance(DummySocket, (False,)) iopub_thread: IOPubThread = Instance(IOPubThread) # type:ignore[assignment] - shell_stream = Instance(DummySocket, ()) # type:ignore[arg-type] + shell_socket = Instance(DummySocket, (True,)) # type:ignore[arg-type] @default("iopub_thread") def _default_iopub_thread(self): @@ -65,13 +67,13 @@ def _default_iopub_thread(self): def _default_iopub_socket(self): return self.iopub_thread.background_socket - stdin_socket = Instance(DummySocket, ()) # type:ignore[assignment] + stdin_socket = Instance(DummySocket, (False,)) # type:ignore[assignment] def __init__(self, **traits): """Initialize the kernel.""" super().__init__(**traits) - self._underlying_iopub_socket.observe(self._io_dispatch, names=["message_sent"]) + self._io_dispatch() if self.shell: self.shell.kernel = self @@ -80,10 +82,14 @@ async def execute_request(self, stream, ident, parent): with self._redirected_io(): await super().execute_request(stream, ident, parent) - def start(self): + async def start(self, *, task_status: TaskStatus = TASK_STATUS_IGNORED) -> None: """Override registration of dispatchers for streams.""" if self.shell: self.shell.exit_now = False + await super().start(task_status=task_status) + + def stop(self): + super().stop() def _abort_queues(self): """The in-process kernel doesn't abort requests.""" @@ -128,14 +134,17 @@ def _redirected_io(self): # ------ Trait change handlers -------------------------------------------- - def _io_dispatch(self, change): + def _io_dispatch(self): """Called when a message is sent to the IO socket.""" assert self.iopub_socket.io_thread is not None assert self.session is not None - ident, msg = self.session.recv(self.iopub_socket.io_thread.socket, copy=False) - for frontend in self.frontends: - assert frontend is not None - frontend.iopub_channel.call_handlers(msg) + + def callback(msg): + for frontend in self.frontends: + assert frontend is not None + frontend.iopub_channel.call_handlers(msg) + + self.iopub_thread.socket.on_recv = callback # ------ Trait initializers ----------------------------------------------- @@ -145,7 +154,7 @@ def _default_log(self): @default("session") def _default_session(self): - from jupyter_client.session import Session + from .session import Session return Session(parent=self, key=INPROCESS_KEY) diff --git a/ipykernel/inprocess/manager.py b/ipykernel/inprocess/manager.py index 3a3f92c37..9f0fcc758 100644 --- a/ipykernel/inprocess/manager.py +++ b/ipykernel/inprocess/manager.py @@ -3,12 +3,16 @@ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. +from typing import Any + +from anyio import TASK_STATUS_IGNORED +from anyio.abc import TaskStatus from jupyter_client.manager import KernelManager from jupyter_client.managerabc import KernelManagerABC -from jupyter_client.session import Session from traitlets import DottedObjectName, Instance, default from .constants import INPROCESS_KEY +from .session import Session class InProcessKernelManager(KernelManager): @@ -41,11 +45,14 @@ def _default_session(self): # Kernel management methods # -------------------------------------------------------------------------- - def start_kernel(self, **kwds): + async def start_kernel( # type: ignore[explicit-override, override] + self, *, task_status: TaskStatus = TASK_STATUS_IGNORED, **kwds: Any + ) -> None: """Start the kernel.""" from ipykernel.inprocess.ipkernel import InProcessKernel self.kernel = InProcessKernel(parent=self, session=self.session) + await self.kernel.start(task_status=task_status) def shutdown_kernel(self): """Shutdown the kernel.""" @@ -53,17 +60,26 @@ def shutdown_kernel(self): self.kernel.iopub_thread.stop() self._kill_kernel() - def restart_kernel(self, now=False, **kwds): + async def restart_kernel( # type: ignore[explicit-override, override] + self, + now: bool = False, + newports: bool = False, + *, + task_status: TaskStatus = TASK_STATUS_IGNORED, + **kw: Any, + ) -> None: """Restart the kernel.""" self.shutdown_kernel() - self.start_kernel(**kwds) + await self.start_kernel(task_status=task_status, **kw) @property def has_kernel(self): return self.kernel is not None def _kill_kernel(self): - self.kernel = None + if self.kernel: + self.kernel.stop() + self.kernel = None def interrupt_kernel(self): """Interrupt the kernel.""" diff --git a/ipykernel/inprocess/session.py b/ipykernel/inprocess/session.py new file mode 100644 index 000000000..0eaed2c60 --- /dev/null +++ b/ipykernel/inprocess/session.py @@ -0,0 +1,41 @@ +from jupyter_client.session import Session as _Session + + +class Session(_Session): + async def recv(self, socket, copy=True): + return await socket.recv_multipart() + + def send( + self, + socket, + msg_or_type, + content=None, + parent=None, + ident=None, + buffers=None, + track=False, + header=None, + metadata=None, + ): + if isinstance(msg_or_type, str): + msg = self.msg( + msg_or_type, + content=content, + parent=parent, + header=header, + metadata=metadata, + ) + else: + # We got a Message or message dict, not a msg_type so don't + # build a new Message. + msg = msg_or_type + buffers = buffers or msg.get("buffers", []) + + socket.send_multipart(msg) + return msg + + def feed_identities(self, msg, copy=True): + return "", msg + + def deserialize(self, msg, content=True, copy=True): + return msg diff --git a/ipykernel/inprocess/socket.py b/ipykernel/inprocess/socket.py index 2df72b5e1..edc77c286 100644 --- a/ipykernel/inprocess/socket.py +++ b/ipykernel/inprocess/socket.py @@ -3,10 +3,12 @@ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -from queue import Queue +from math import inf import zmq -from traitlets import HasTraits, Instance, Int +import zmq.asyncio +from anyio import create_memory_object_stream +from traitlets import HasTraits, Instance # ----------------------------------------------------------------------------- # Dummy socket class @@ -14,28 +16,50 @@ class DummySocket(HasTraits): - """A dummy socket implementing (part of) the zmq.Socket interface.""" + """A dummy socket implementing (part of) the zmq.asyncio.Socket interface.""" - queue = Instance(Queue, ()) - message_sent = Int(0) # Should be an Event - context = Instance(zmq.Context) + context = Instance(zmq.asyncio.Context) def _context_default(self): - return zmq.Context() + return zmq.asyncio.Context() # ------------------------------------------------------------------------- # Socket interface # ------------------------------------------------------------------------- - def recv_multipart(self, flags=0, copy=True, track=False): + def __init__(self, is_shell, *args, **kwargs): + super().__init__(*args, **kwargs) + self.is_shell = is_shell + self.on_recv = None + if is_shell: + self.in_send_stream, self.in_receive_stream = create_memory_object_stream[dict]( + max_buffer_size=inf + ) + self.out_send_stream, self.out_receive_stream = create_memory_object_stream[dict]( + max_buffer_size=inf + ) + + def put(self, msg): + self.in_send_stream.send_nowait(msg) + + async def get(self): + return await self.out_receive_stream.receive() + + async def recv_multipart(self, flags=0, copy=True, track=False): """Recv a multipart message.""" - return self.queue.get_nowait() + return await self.in_receive_stream.receive() def send_multipart(self, msg_parts, flags=0, copy=True, track=False): """Send a multipart message.""" - msg_parts = list(map(zmq.Message, msg_parts)) - self.queue.put_nowait(msg_parts) - self.message_sent += 1 + if self.is_shell: + self.out_send_stream.send_nowait(msg_parts) + if self.on_recv is not None: + self.on_recv(msg_parts) def flush(self, timeout=1.0): """no-op to comply with stream API""" + + async def poll(self, timeout=0): + assert timeout == 0 + statistics = self.in_receive_stream.statistics() + return statistics.current_buffer_used != 0 diff --git a/ipykernel/iostream.py b/ipykernel/iostream.py index 257b5c800..ea70831b8 100644 --- a/ipykernel/iostream.py +++ b/ipykernel/iostream.py @@ -15,13 +15,12 @@ from binascii import b2a_hex from collections import defaultdict, deque from io import StringIO, TextIOBase -from threading import local +from threading import Event, Thread, local from typing import Any, Callable, Deque, Dict, Optional import zmq +from anyio import create_task_group, run, sleep, to_thread from jupyter_client.session import extract_header -from tornado.ioloop import IOLoop -from zmq.eventloop.zmqstream import ZMQStream # ----------------------------------------------------------------------------- # Globals @@ -37,6 +36,38 @@ # ----------------------------------------------------------------------------- +class _IOPubThread(Thread): + """A thread for a IOPub.""" + + def __init__(self, tasks, **kwargs): + """Initialize the thread.""" + Thread.__init__(self, name="IOPub", **kwargs) + self._tasks = tasks + self.pydev_do_not_trace = True + self.is_pydev_daemon_thread = True + self.daemon = True + self.__stop = Event() + + def run(self): + """Run the thread.""" + self.name = "IOPub" + run(self._main) + + async def _main(self): + async with create_task_group() as tg: + for task in self._tasks: + tg.start_soon(task) + await to_thread.run_sync(self.__stop.wait) + tg.cancel_scope.cancel() + + def stop(self): + """Stop the thread. + + This method is threadsafe. + """ + self.__stop.set() + + class IOPubThread: """An object for sending IOPub messages in a background thread @@ -58,11 +89,9 @@ def __init__(self, socket, pipe=False): piped from subprocesses. """ self.socket = socket - self._stopped = False self.background_socket = BackgroundSocket(self) self._master_pid = os.getpid() self._pipe_flag = pipe - self.io_loop = IOLoop(make_current=False) if pipe: self._setup_pipe_in() self._local = threading.local() @@ -72,53 +101,25 @@ def __init__(self, socket, pipe=False): self._event_pipe_gc_seconds: float = 10 self._event_pipe_gc_task: Optional[asyncio.Task[Any]] = None self._setup_event_pipe() - self.thread = threading.Thread(target=self._thread_main, name="IOPub") - self.thread.daemon = True - self.thread.pydev_do_not_trace = True # type:ignore[attr-defined] - self.thread.is_pydev_daemon_thread = True # type:ignore[attr-defined] - self.thread.name = "IOPub" - - def _thread_main(self): - """The inner loop that's actually run in a thread""" - - def _start_event_gc(): - self._event_pipe_gc_task = asyncio.ensure_future(self._run_event_pipe_gc()) - - self.io_loop.run_sync(_start_event_gc) - - if not self._stopped: - # avoid race if stop called before start thread gets here - # probably only comes up in tests - self.io_loop.start() - - if self._event_pipe_gc_task is not None: - # cancel gc task to avoid pending task warnings - async def _cancel(): - self._event_pipe_gc_task.cancel() # type:ignore[union-attr] - - if not self._stopped: - self.io_loop.run_sync(_cancel) - else: - self._event_pipe_gc_task.cancel() - - self.io_loop.close(all_fds=True) + tasks = [self._handle_event, self._run_event_pipe_gc] + if pipe: + tasks.append(self._handle_pipe_msgs) + self.thread = _IOPubThread(tasks) def _setup_event_pipe(self): """Create the PULL socket listening for events that should fire in this thread.""" ctx = self.socket.context - pipe_in = ctx.socket(zmq.PULL) - pipe_in.linger = 0 + self._pipe_in0 = ctx.socket(zmq.PULL) + self._pipe_in0.linger = 0 _uuid = b2a_hex(os.urandom(16)).decode("ascii") iface = self._event_interface = "inproc://%s" % _uuid - pipe_in.bind(iface) - self._event_puller = ZMQStream(pipe_in, self.io_loop) - self._event_puller.on_recv(self._handle_event) + self._pipe_in0.bind(iface) async def _run_event_pipe_gc(self): """Task to run event pipe gc continuously""" while True: - await asyncio.sleep(self._event_pipe_gc_seconds) + await sleep(self._event_pipe_gc_seconds) try: await self._event_pipe_gc() except Exception as e: @@ -142,7 +143,7 @@ def _event_pipe(self): event_pipe = self._local.event_pipe except AttributeError: # new thread, new event pipe - ctx = self.socket.context + ctx = zmq.Context(self.socket.context) event_pipe = ctx.socket(zmq.PUSH) event_pipe.linger = 0 event_pipe.connect(self._event_interface) @@ -154,7 +155,7 @@ def _event_pipe(self): self._event_pipes[threading.current_thread()] = event_pipe return event_pipe - def _handle_event(self, msg): + async def _handle_event(self): """Handle an event on the event pipe Content of the message is ignored. @@ -162,12 +163,19 @@ def _handle_event(self, msg): Whenever *an* event arrives on the event stream, *all* waiting events are processed in order. """ - # freeze event count so new writes don't extend the queue - # while we are processing - n_events = len(self._events) - for _ in range(n_events): - event_f = self._events.popleft() - event_f() + try: + while True: + await self._pipe_in0.recv() + # freeze event count so new writes don't extend the queue + # while we are processing + n_events = len(self._events) + for _ in range(n_events): + event_f = self._events.popleft() + event_f() + except Exception as e: + if self.thread.__stop.is_set(): + return + raise e def _setup_pipe_in(self): """setup listening pipe for IOPub from forked subprocesses""" @@ -176,11 +184,11 @@ def _setup_pipe_in(self): # use UUID to authenticate pipe messages self._pipe_uuid = os.urandom(16) - pipe_in = ctx.socket(zmq.PULL) - pipe_in.linger = 0 + self._pipe_in1 = ctx.socket(zmq.PULL) + self._pipe_in1.linger = 0 try: - self._pipe_port = pipe_in.bind_to_random_port("tcp://127.0.0.1") + self._pipe_port = self._pipe_in1.bind_to_random_port("tcp://127.0.0.1") except zmq.ZMQError as e: warnings.warn( "Couldn't bind IOPub Pipe to 127.0.0.1: %s" % e @@ -188,13 +196,22 @@ def _setup_pipe_in(self): stacklevel=2, ) self._pipe_flag = False - pipe_in.close() + self._pipe_in1.close() return - self._pipe_in = ZMQStream(pipe_in, self.io_loop) - self._pipe_in.on_recv(self._handle_pipe_msg) - def _handle_pipe_msg(self, msg): + async def _handle_pipe_msgs(self): + """handle pipe messages from a subprocess""" + try: + while True: + await self._handle_pipe_msg() + except Exception as e: + if self.thread.__stop.is_set(): + return + raise e + + async def _handle_pipe_msg(self, msg=None): """handle a pipe message from a subprocess""" + msg = msg or await self._pipe_in1.recv_multipart() if not self._pipe_flag or not self._is_master_process(): return if msg[0] != self._pipe_uuid: @@ -221,7 +238,6 @@ def _check_mp_mode(self): def start(self): """Start the IOPub thread""" - self.thread.name = "IOPub" self.thread.start() # make sure we don't prevent process exit # I'm not sure why setting daemon=True above isn't enough, but it doesn't appear to be. @@ -229,10 +245,9 @@ def start(self): def stop(self): """Stop the IOPub thread""" - self._stopped = True if not self.thread.is_alive(): return - self.io_loop.add_callback(self.io_loop.stop) + self.thread.stop() self.thread.join(timeout=30) if self.thread.is_alive(): @@ -249,6 +264,9 @@ def close(self): """Close the IOPub thread.""" if self.closed: return + self._pipe_in0.close() + if self._pipe_flag: + self._pipe_in1.close() self.socket.close() self.socket = None @@ -264,7 +282,11 @@ def schedule(self, f): if self.thread.is_alive(): self._events.append(f) # wake event thread (message content is ignored) - self._event_pipe.send(b"") + try: + self._event_pipe.send(b"") + except RuntimeError: + pass + else: f() @@ -434,6 +456,8 @@ def __init__( ) # This is necessary for compatibility with Python built-in streams self.session = session + self._has_thread = False + self.watch_fd_thread = None if not isinstance(pub_thread, IOPubThread): # Backward-compat: given socket, not thread. Wrap in a thread. warnings.warn( @@ -444,6 +468,7 @@ def __init__( ) pub_thread = IOPubThread(pub_thread) pub_thread.start() + self._has_thread = True self.pub_thread = pub_thread self.name = name self.topic = b"stream." + name.encode() @@ -457,7 +482,6 @@ def __init__( self._master_pid = os.getpid() self._flush_pending = False self._subprocess_flush_pending = False - self._io_loop = pub_thread.io_loop self._buffer_lock = threading.RLock() self._buffers = defaultdict(StringIO) self.echo = None @@ -561,13 +585,16 @@ def close(self): # thread won't wake unless there's something to read # writing something after _should_watch will not be echoed os.write(self._original_stdstream_fd, b"\0") - self.watch_fd_thread.join() + if self.watch_fd_thread is not None: + self.watch_fd_thread.join() # restore original FDs os.dup2(self._original_stdstream_copy, self._original_stdstream_fd) os.close(self._original_stdstream_copy) if self._exc: etype, value, tb = self._exc traceback.print_exception(etype, value, tb) + if self._has_thread: + self.pub_thread.stop() self.pub_thread = None @property @@ -584,10 +611,7 @@ def _schedule_flush(self): self._flush_pending = True # add_timeout has to be handed to the io thread via event pipe - def _schedule_in_thread(): - self._io_loop.call_later(self.flush_interval, self._flush) - - self.pub_thread.schedule(_schedule_in_thread) + self.pub_thread.schedule(self._flush) def flush(self): """trigger actual zmq send diff --git a/ipykernel/ipkernel.py b/ipykernel/ipkernel.py index 9bea4d56b..15242933b 100644 --- a/ipykernel/ipkernel.py +++ b/ipykernel/ipkernel.py @@ -1,23 +1,22 @@ """The IPython kernel implementation""" -import asyncio import builtins import gc import getpass import os -import signal import sys import threading import typing as t -from contextlib import contextmanager -from functools import partial +from dataclasses import dataclass import comm +import zmq.asyncio +from anyio import TASK_STATUS_IGNORED, create_task_group, to_thread +from anyio.abc import TaskStatus from IPython.core import release from IPython.utils.tokenutil import line_at_cursor, token_at_cursor from jupyter_client.session import extract_header from traitlets import Any, Bool, HasTraits, Instance, List, Type, observe, observe_compat -from zmq.eventloop.zmqstream import ZMQStream from .comm.comm import BaseComm from .comm.manager import CommManager @@ -29,11 +28,6 @@ from .kernelbase import _accepts_parameters from .zmqshell import ZMQInteractiveShell -try: - from IPython.core.interactiveshell import _asyncio_runner # type:ignore[attr-defined] -except ImportError: - _asyncio_runner = None # type:ignore[assignment] - try: from IPython.core.completer import provisionalcompleter as _provisionalcompleter from IPython.core.completer import rectify_completions as _rectify_completions @@ -81,7 +75,9 @@ class IPythonKernel(KernelBase): help="Set this flag to False to deactivate the use of experimental IPython completion APIs.", ).tag(config=True) - debugpy_stream = Instance(ZMQStream, allow_none=True) if _is_debugpy_available else None + debugpy_socket = ( + Instance(zmq.asyncio.Socket, allow_none=True) if _is_debugpy_available else None + ) user_module = Any() @@ -109,11 +105,13 @@ def __init__(self, **kwargs): """Initialize the kernel.""" super().__init__(**kwargs) + self.executing_blocking_code_in_main_shell = False + # Initialize the Debugger if _is_debugpy_available: self.debugger = Debugger( self.log, - self.debugpy_stream, + self.debugpy_socket, self._publish_debug_event, self.debug_shell_socket, self.session, @@ -208,12 +206,31 @@ def __init__(self, **kwargs): "file_extension": ".py", } - def dispatch_debugpy(self, msg): - if _is_debugpy_available: - # The first frame is the socket id, we can drop it - frame = msg[1].bytes.decode("utf-8") - self.log.debug("Debugpy received: %s", frame) - self.debugger.tcp_client.receive_dap_frame(frame) + async def process_debugpy(self): + async with create_task_group() as tg: + tg.start_soon(self.receive_debugpy_messages) + tg.start_soon(self.poll_stopped_queue) + await to_thread.run_sync(self.debugpy_stop.wait) + tg.cancel_scope.cancel() + + async def receive_debugpy_messages(self): + if not _is_debugpy_available: + return + + while True: + await self.receive_debugpy_message() + + async def receive_debugpy_message(self, msg=None): + if not _is_debugpy_available: + return + + if msg is None: + assert self.debugpy_socket is not None + msg = await self.debugpy_socket.recv_multipart() + # The first frame is the socket id, we can drop it + frame = msg[1].decode("utf-8") + self.log.debug("Debugpy received: %s", frame) + self.debugger.tcp_client.receive_dap_frame(frame) @property def banner(self): @@ -226,19 +243,21 @@ async def poll_stopped_queue(self): while True: await self.debugger.handle_stopped_event() - def start(self): + async def start(self, *, task_status: TaskStatus = TASK_STATUS_IGNORED) -> None: """Start the kernel.""" if self.shell: self.shell.exit_now = False - if self.debugpy_stream is None: - self.log.warning("debugpy_stream undefined, debugging will not be enabled") + if self.debugpy_socket is None: + self.log.warning("debugpy_socket undefined, debugging will not be enabled") else: - self.debugpy_stream.on_recv(self.dispatch_debugpy, copy=False) - super().start() - if self.debugpy_stream: - asyncio.run_coroutine_threadsafe( - self.poll_stopped_queue(), self.control_thread.io_loop.asyncio_loop - ) + self.debugpy_stop = threading.Event() + self.control_tasks.append(self.process_debugpy) + await super().start(task_status=task_status) + + def stop(self): + super().stop() + if self.debugpy_socket is not None: + self.debugpy_stop.set() def set_parent(self, ident, parent, channel="shell"): """Overridden from parent to tell the display hook and output streams @@ -308,50 +327,6 @@ def execution_count(self, value): # execution counter. pass - @contextmanager - def _cancel_on_sigint(self, future): - """ContextManager for capturing SIGINT and cancelling a future - - SIGINT raises in the event loop when running async code, - but we want it to halt a coroutine. - - Ideally, it would raise KeyboardInterrupt, - but this turns it into a CancelledError. - At least it gets a decent traceback to the user. - """ - sigint_future: asyncio.Future[int] = asyncio.Future() - - # whichever future finishes first, - # cancel the other one - def cancel_unless_done(f, _ignored): - if f.cancelled() or f.done(): - return - f.cancel() - - # when sigint finishes, - # abort the coroutine with CancelledError - sigint_future.add_done_callback(partial(cancel_unless_done, future)) - # when the main future finishes, - # stop watching for SIGINT events - future.add_done_callback(partial(cancel_unless_done, sigint_future)) - - def handle_sigint(*args): - def set_sigint_result(): - if sigint_future.cancelled() or sigint_future.done(): - return - sigint_future.set_result(1) - - # use add_callback for thread safety - self.io_loop.add_callback(set_sigint_result) - - # set the custom sigint handler during this context - save_sigint = signal.signal(signal.SIGINT, handle_sigint) - try: - yield - finally: - # restore the previous sigint handler - signal.signal(signal.SIGINT, save_sigint) - async def execute_request(self, stream, ident, parent): """Override for cell output - cell reconciliation.""" parent_header = extract_header(parent) @@ -379,7 +354,7 @@ async def do_execute( if hasattr(shell, "run_cell_async") and hasattr(shell, "should_run_async"): run_cell = shell.run_cell_async should_run_async = shell.should_run_async - accepts_params = _accepts_parameters(run_cell, ["cell_id"]) + with_cell_id = _accepts_parameters(run_cell, ["cell_id"]) else: should_run_async = lambda cell: False # noqa: ARG005, E731 # older IPython, @@ -388,7 +363,7 @@ async def do_execute( async def run_cell(*args, **kwargs): return shell.run_cell(*args, **kwargs) - accepts_params = _accepts_parameters(shell.run_cell, ["cell_id"]) + with_cell_id = _accepts_parameters(shell.run_cell, ["cell_id"]) try: # default case: runner is asyncio and asyncio is already running # TODO: this should check every case for "are we inside the runner", @@ -400,63 +375,70 @@ async def run_cell(*args, **kwargs): transformed_cell = code preprocessing_exc_tuple = sys.exc_info() - if ( - _asyncio_runner # type:ignore[truthy-bool] - and shell.loop_runner is _asyncio_runner - and asyncio.get_event_loop().is_running() - and should_run_async( - code, + kwargs = dict( + store_history=store_history, + silent=silent, + ) + if with_cell_id: + kwargs.update(cell_id=cell_id) + + if should_run_async( + code, + transformed_cell=transformed_cell, + preprocessing_exc_tuple=preprocessing_exc_tuple, + ): + kwargs.update( transformed_cell=transformed_cell, preprocessing_exc_tuple=preprocessing_exc_tuple, ) - ): - if accepts_params["cell_id"]: - coro = run_cell( - code, - store_history=store_history, - silent=silent, - transformed_cell=transformed_cell, - preprocessing_exc_tuple=preprocessing_exc_tuple, - cell_id=cell_id, - ) - else: - coro = run_cell( - code, - store_history=store_history, - silent=silent, - transformed_cell=transformed_cell, - preprocessing_exc_tuple=preprocessing_exc_tuple, - ) - coro_future = asyncio.ensure_future(coro) + coro = run_cell(code, **kwargs) + + @dataclass + class Execution: + interrupt: bool = False + result: t.Any = None + + async def run(execution: Execution) -> None: + execution.result = await coro + if not execution.interrupt: + self.shell_interrupt.put(False) + + res = None + try: + async with create_task_group() as tg: + execution = Execution() + self.shell_is_awaiting = True + tg.start_soon(run, execution) + execution.interrupt = await to_thread.run_sync(self.shell_interrupt.get) + self.shell_is_awaiting = False + if execution.interrupt: + tg.cancel_scope.cancel() + + res = execution.result + finally: + shell.events.trigger("post_execute") + if not silent: + shell.events.trigger("post_run_cell", res) - with self._cancel_on_sigint(coro_future): - res = None - try: - res = await coro_future - finally: - shell.events.trigger("post_execute") - if not silent: - shell.events.trigger("post_run_cell", res) else: # runner isn't already running, # make synchronous call, # letting shell dispatch to loop runners - if accepts_params["cell_id"]: - res = shell.run_cell( - code, - store_history=store_history, - silent=silent, - cell_id=cell_id, - ) - else: - res = shell.run_cell(code, store_history=store_history, silent=silent) + self.shell_is_blocking = True + try: + res = shell.run_cell(code, **kwargs) + finally: + self.shell_is_blocking = False finally: self._restore_input() - err = res.error_before_exec if res.error_before_exec is not None else res.error_in_exec + if res is not None: + err = res.error_before_exec if res.error_before_exec is not None else res.error_in_exec + else: + err = KeyboardInterrupt() - if res.success: + if res is not None and res.success: reply_content["status"] = "ok" else: reply_content["status"] = "error" diff --git a/ipykernel/kernelapp.py b/ipykernel/kernelapp.py index 097b65aa9..98b08b845 100644 --- a/ipykernel/kernelapp.py +++ b/ipykernel/kernelapp.py @@ -18,6 +18,8 @@ from pathlib import Path import zmq +import zmq.asyncio +from anyio import create_task_group, run from IPython.core.application import ( # type:ignore[attr-defined] BaseIPythonApplication, base_aliases, @@ -29,7 +31,6 @@ from jupyter_client.connect import ConnectionFileMixin from jupyter_client.session import Session, session_aliases, session_flags from jupyter_core.paths import jupyter_runtime_dir -from tornado import ioloop from traitlets.traitlets import ( Any, Bool, @@ -43,7 +44,6 @@ ) from traitlets.utils import filefind from traitlets.utils.importstring import import_item -from zmq.eventloop.zmqstream import ZMQStream from .connect import get_connection_info, write_connection_file @@ -323,7 +323,7 @@ def init_sockets(self): """Create a context, a session, and the kernel sockets.""" self.log.info("Starting the kernel at pid: %i", os.getpid()) assert self.context is None, "init_sockets cannot be called twice!" - self.context = context = zmq.Context() + self.context = context = zmq.asyncio.Context() atexit.register(self.close) self.shell_socket = context.socket(zmq.ROUTER) @@ -331,7 +331,7 @@ def init_sockets(self): self.shell_port = self._bind_socket(self.shell_socket, self.shell_port) self.log.debug("shell ROUTER Channel on port: %i" % self.shell_port) - self.stdin_socket = context.socket(zmq.ROUTER) + self.stdin_socket = zmq.Context(context).socket(zmq.ROUTER) self.stdin_socket.linger = 1000 self.stdin_port = self._bind_socket(self.stdin_socket, self.stdin_port) self.log.debug("stdin ROUTER Channel on port: %i" % self.stdin_port) @@ -540,25 +540,27 @@ def register(signum, file=sys.__stderr__, all_threads=True, chain=False, **kwarg faulthandler.register = register + def sigint_handler(self, *args): + if self.kernel.shell_is_awaiting: + self.kernel.shell_interrupt.put(True) + elif self.kernel.shell_is_blocking: + raise KeyboardInterrupt + def init_signal(self): """Initialize the signal handler.""" - signal.signal(signal.SIGINT, signal.SIG_IGN) + signal.signal(signal.SIGINT, self.sigint_handler) def init_kernel(self): """Create the Kernel object itself""" - shell_stream = ZMQStream(self.shell_socket) - control_stream = ZMQStream(self.control_socket, self.control_thread.io_loop) - debugpy_stream = ZMQStream(self.debugpy_socket, self.control_thread.io_loop) - self.control_thread.start() kernel_factory = self.kernel_class.instance # type:ignore[attr-defined] kernel = kernel_factory( parent=self, session=self.session, - control_stream=control_stream, - debugpy_stream=debugpy_stream, + control_socket=self.control_socket, + debugpy_socket=self.debugpy_socket, debug_shell_socket=self.debug_shell_socket, - shell_stream=shell_stream, + shell_socket=self.shell_socket, control_thread=self.control_thread, iopub_thread=self.iopub_thread, iopub_socket=self.iopub_socket, @@ -717,28 +719,25 @@ def initialize(self, argv=None): sys.stdout.flush() sys.stderr.flush() - def start(self): + def start(self) -> None: """Start the application.""" if self.subapp is not None: - return self.subapp.start() + self.subapp.start() if self.poller is not None: self.poller.start() - self.kernel.start() - self.io_loop = ioloop.IOLoop.current() - if self.trio_loop: - from ipykernel.trio_runner import TrioRunner - - tr = TrioRunner() - tr.initialize(self.kernel, self.io_loop) - try: - tr.run() - except KeyboardInterrupt: - pass - else: - try: - self.io_loop.start() - except KeyboardInterrupt: - pass + backend = "trio" if self.trio_loop else "asyncio" + run(self.main, backend=backend) + return + + async def main(self): + async with create_task_group() as tg: + if self.kernel.eventloop: + tg.start_soon(self.kernel.enter_eventloop) + tg.start_soon(self.kernel.start) + + def stop(self): + """Stop the kernel, thread-safe.""" + self.kernel.stop() launch_new_instance = IPKernelApp.launch_instance diff --git a/ipykernel/kernelbase.py b/ipykernel/kernelbase.py index 01539fd22..64a935273 100644 --- a/ipykernel/kernelbase.py +++ b/ipykernel/kernelbase.py @@ -9,7 +9,7 @@ import itertools import logging import os -import socket +import queue import sys import threading import time @@ -17,8 +17,7 @@ import uuid import warnings from datetime import datetime -from functools import partial -from signal import SIGINT, SIGTERM, Signals, default_int_handler, signal +from signal import SIGINT, SIGTERM, Signals from .control import CONTROL_THREAD_NAME @@ -37,10 +36,10 @@ import psutil import zmq +from anyio import TASK_STATUS_IGNORED, create_task_group, sleep, to_thread +from anyio.abc import TaskStatus from IPython.core.error import StdinNotImplementedError from jupyter_client.session import Session -from tornado import ioloop -from tornado.queues import Queue, QueueEmpty from traitlets.config.configurable import SingletonConfigurable from traitlets.traitlets import ( Any, @@ -53,9 +52,7 @@ Set, Unicode, default, - observe, ) -from zmq.eventloop.zmqstream import ZMQStream from ipykernel.jsonutil import json_clean @@ -80,6 +77,8 @@ def _accepts_parameters(meth, param_names): class Kernel(SingletonConfigurable): """The base kernel class.""" + _aborted_time: float + # --------------------------------------------------------------------------- # Kernel interface # --------------------------------------------------------------------------- @@ -89,57 +88,18 @@ class Kernel(SingletonConfigurable): processes: dict[str, psutil.Process] = {} - @observe("eventloop") - def _update_eventloop(self, change): - """schedule call to eventloop from IOLoop""" - loop = ioloop.IOLoop.current() - if change.new is not None: - loop.add_callback(self.enter_eventloop) - session = Instance(Session, allow_none=True) profile_dir = Instance("IPython.core.profiledir.ProfileDir", allow_none=True) - shell_stream = Instance(ZMQStream, allow_none=True) - - shell_streams: List[t.Any] = List( - help="""Deprecated shell_streams alias. Use shell_stream - - .. versionchanged:: 6.0 - shell_streams is deprecated. Use shell_stream. - """ - ) + shell_socket = Instance(zmq.asyncio.Socket, allow_none=True) implementation: str implementation_version: str banner: str - @default("shell_streams") - def _shell_streams_default(self): # pragma: no cover - warnings.warn( - "Kernel.shell_streams is deprecated in ipykernel 6.0. Use Kernel.shell_stream", - DeprecationWarning, - stacklevel=2, - ) - if self.shell_stream is not None: - return [self.shell_stream] - return [] - - @observe("shell_streams") - def _shell_streams_changed(self, change): # pragma: no cover - warnings.warn( - "Kernel.shell_streams is deprecated in ipykernel 6.0. Use Kernel.shell_stream", - DeprecationWarning, - stacklevel=2, - ) - if len(change.new) > 1: - warnings.warn( - "Kernel only supports one shell stream. Additional streams will be ignored.", - RuntimeWarning, - stacklevel=2, - ) - if change.new: - self.shell_stream = change.new[0] + _is_test = Bool(False) - control_stream = Instance(ZMQStream, allow_none=True) + control_socket = Instance(zmq.asyncio.Socket, allow_none=True) + control_tasks: t.Any = List() debug_shell_socket = Any() @@ -293,18 +253,25 @@ def __init__(self, **kwargs): self.do_execute, ["cell_meta", "cell_id"] ) - async def dispatch_control(self, msg): - # Ensure only one control message is processed at a time - async with asyncio.Lock(): - await self.process_control(msg) + async def process_control(self): + try: + while True: + await self.process_control_message() + except BaseException as e: + print("base exception") + if self.control_stop.is_set(): + return + raise e - async def process_control(self, msg): + async def process_control_message(self, msg=None): """dispatch control requests""" - if not self.session: - return - idents, msg = self.session.feed_identities(msg, copy=False) + assert self.control_socket is not None + assert self.session is not None + msg = msg or await self.control_socket.recv_multipart() + copy = not isinstance(msg[0], zmq.Message) + idents, msg = self.session.feed_identities(msg, copy=copy) try: - msg = self.session.deserialize(msg, content=True, copy=False) + msg = self.session.deserialize(msg, content=True, copy=copy) except Exception: self.log.error("Invalid Control Message", exc_info=True) # noqa: G201 return @@ -323,7 +290,7 @@ async def process_control(self, msg): self.log.error("UNKNOWN CONTROL MESSAGE TYPE: %r", msg_type) else: try: - result = handler(self.control_stream, idents, msg) + result = handler(self.control_socket, idents, msg) if inspect.isawaitable(result): await result except Exception: @@ -332,11 +299,8 @@ async def process_control(self, msg): sys.stdout.flush() sys.stderr.flush() self._publish_status("idle", "control") - # flush to ensure reply is sent - if self.control_stream: - self.control_stream.flush(zmq.POLLOUT) - def should_handle(self, stream, msg, idents): + async def should_handle(self, stream, msg, idents): """Check whether a shell-channel message should be handled Allows subclasses to prevent handling of certain messages (e.g. aborted requests). @@ -345,19 +309,82 @@ def should_handle(self, stream, msg, idents): if msg_id in self.aborted: # is it safe to assume a msg_id will not be resubmitted? self.aborted.remove(msg_id) - self._send_abort_reply(stream, msg, idents) + await self._send_abort_reply(stream, msg, idents) return False return True - async def dispatch_shell(self, msg): - """dispatch shell requests""" - if not self.session: + async def enter_eventloop(self): + """enter eventloop""" + self.log.info("Entering eventloop %s", self.eventloop) + # record handle, so we can check when this changes + eventloop = self.eventloop + if eventloop is None: + self.log.info("Exiting as there is no eventloop") return - idents, msg = self.session.feed_identities(msg, copy=False) + async def advance_eventloop(): + # check if eventloop changed: + if self.eventloop is not eventloop: + self.log.info("exiting eventloop %s", eventloop) + return + self.log.debug("Advancing eventloop %s", eventloop) + try: + eventloop(self) + except KeyboardInterrupt: + # Ctrl-C shouldn't crash the kernel + self.log.error("KeyboardInterrupt caught in kernel") + if self.eventloop is eventloop: + # schedule advance again + await schedule_next() + + async def schedule_next(): + """Schedule the next advance of the eventloop""" + # flush the eventloop every so often, + # giving us a chance to handle messages in the meantime + self.log.debug("Scheduling eventloop advance") + await sleep(0.001) + await advance_eventloop() + + # begin polling the eventloop + await schedule_next() + + _message_counter = Any( + help="""Monotonic counter of messages + """, + ) + + @default("_message_counter") + def _message_counter_default(self): + return itertools.count() + + async def shell_main(self): + async with create_task_group() as tg: + tg.start_soon(self.process_shell) + await to_thread.run_sync(self.shell_stop.wait) + tg.cancel_scope.cancel() + + async def process_shell(self): try: - msg = self.session.deserialize(msg, content=True, copy=False) - except Exception: + while True: + await self.process_shell_message() + except BaseException as e: + if self.shell_stop.is_set(): + return + raise e + + async def process_shell_message(self, msg=None): + assert self.shell_socket is not None + assert self.session is not None + + no_msg = msg is None if self._is_test else not await self.shell_socket.poll(0) + + msg = msg or await self.shell_socket.recv_multipart() + received_time = time.monotonic() + copy = not isinstance(msg[0], zmq.Message) + idents, msg = self.session.feed_identities(msg, copy=copy) + try: + msg = self.session.deserialize(msg, content=True, copy=copy) + except BaseException: self.log.error("Invalid Message", exc_info=True) # noqa: G201 return @@ -369,13 +396,15 @@ async def dispatch_shell(self, msg): # Only abort execute requests if self._aborting and msg_type == "execute_request": - self._send_abort_reply(self.shell_stream, msg, idents) - self._publish_status("idle", "shell") - # flush to ensure reply is sent before - # handling the next request - if self.shell_stream: - self.shell_stream.flush(zmq.POLLOUT) - return + if not self.stop_on_error_timeout: + if no_msg: + self._aborting = False + elif received_time - self._aborted_time > self.stop_on_error_timeout: + self._aborting = False + if self._aborting: + await self._send_abort_reply(self.shell_socket, msg, idents) + self._publish_status("idle", "shell") + return # Print some info about this message and leave a '--->' marker, so it's # easier to trace visually the message chain when debugging. Each @@ -383,10 +412,10 @@ async def dispatch_shell(self, msg): self.log.debug("\n*** MESSAGE TYPE:%s***", msg_type) self.log.debug(" Content: %s\n --->\n ", msg["content"]) - if not self.should_handle(self.shell_stream, msg, idents): + if not await self.should_handle(self.shell_socket, msg, idents): return - handler = self.shell_handlers.get(msg_type, None) + handler = self.shell_handlers.get(msg_type) if handler is None: self.log.warning("Unknown message type: %r", msg_type) else: @@ -396,7 +425,7 @@ async def dispatch_shell(self, msg): except Exception: self.log.debug("Unable to signal in pre_handler_hook:", exc_info=True) try: - result = handler(self.shell_stream, idents, msg) + result = handler(self.shell_socket, idents, msg) if inspect.isawaitable(result): await result except Exception: @@ -413,159 +442,43 @@ async def dispatch_shell(self, msg): sys.stdout.flush() sys.stderr.flush() self._publish_status("idle", "shell") - # flush to ensure reply is sent before - # handling the next request - if self.shell_stream: - self.shell_stream.flush(zmq.POLLOUT) + + async def control_main(self): + async with create_task_group() as tg: + for task in self.control_tasks: + tg.start_soon(task) + tg.start_soon(self.process_control) + await to_thread.run_sync(self.control_stop.wait) + tg.cancel_scope.cancel() def pre_handler_hook(self): """Hook to execute before calling message handler""" # ensure default_int_handler during handler call - self.saved_sigint_handler = signal(SIGINT, default_int_handler) def post_handler_hook(self): """Hook to execute after calling message handler""" - signal(SIGINT, self.saved_sigint_handler) - - def enter_eventloop(self): - """enter eventloop""" - self.log.info("Entering eventloop %s", self.eventloop) - # record handle, so we can check when this changes - eventloop = self.eventloop - if eventloop is None: - self.log.info("Exiting as there is no eventloop") - return - - async def advance_eventloop(): - # check if eventloop changed: - if self.eventloop is not eventloop: - self.log.info("exiting eventloop %s", eventloop) - return - if self.msg_queue.qsize(): - self.log.debug("Delaying eventloop due to waiting messages") - # still messages to process, make the eventloop wait - schedule_next() - return - self.log.debug("Advancing eventloop %s", eventloop) - try: - eventloop(self) - except KeyboardInterrupt: - # Ctrl-C shouldn't crash the kernel - self.log.error("KeyboardInterrupt caught in kernel") - if self.eventloop is eventloop: - # schedule advance again - schedule_next() - - def schedule_next(): - """Schedule the next advance of the eventloop""" - # call_later allows the io_loop to process other events if needed. - # Going through schedule_dispatch ensures all other dispatches on msg_queue - # are processed before we enter the eventloop, even if the previous dispatch was - # already consumed from the queue by process_one and the queue is - # technically empty. - self.log.debug("Scheduling eventloop advance") - self.io_loop.call_later(0.001, partial(self.schedule_dispatch, advance_eventloop)) - - # begin polling the eventloop - schedule_next() - - async def do_one_iteration(self): - """Process a single shell message - - Any pending control messages will be flushed as well - - .. versionchanged:: 5 - This is now a coroutine - """ - # flush messages off of shell stream into the message queue - if self.shell_stream: - self.shell_stream.flush() - # process at most one shell message per iteration - await self.process_one(wait=False) - async def process_one(self, wait=True): - """Process one request - - Returns None if no message was handled. - """ - if wait: - t, dispatch, args = await self.msg_queue.get() - else: - try: - t, dispatch, args = self.msg_queue.get_nowait() - except (asyncio.QueueEmpty, QueueEmpty): - return - - if self.control_thread is None and self.control_stream is not None: - # If there isn't a separate control thread then this main thread handles both shell - # and control messages. Before processing a shell message we need to flush all control - # messages and allow them all to be processed. - await asyncio.sleep(0) - self.control_stream.flush() - - socket = self.control_stream.socket - while socket.poll(1): - await asyncio.sleep(0) - self.control_stream.flush() - - await dispatch(*args) - - async def dispatch_queue(self): - """Coroutine to preserve order of message handling - - Ensures that only one message is processing at a time, - even when the handler is async - """ - - while True: - try: - await self.process_one() - except Exception: - self.log.exception("Error in message handler") - - _message_counter = Any( - help="""Monotonic counter of messages - """, - ) + async def start(self, *, task_status: TaskStatus = TASK_STATUS_IGNORED) -> None: + """Process messages on shell and control channels""" + async with create_task_group() as tg: + self.control_stop = threading.Event() + if not self._is_test and self.control_socket is not None: + if self.control_thread: + self.control_thread.set_task(self.control_main) + self.control_thread.start() + else: + tg.start_soon(self.control_main) - @default("_message_counter") - def _message_counter_default(self): - return itertools.count() + self.shell_interrupt: queue.Queue[bool] = queue.Queue() + self.shell_is_awaiting = False + self.shell_is_blocking = False + self.shell_stop = threading.Event() + if not self._is_test and self.shell_socket is not None: + tg.start_soon(self.shell_main) - def schedule_dispatch(self, dispatch, *args): - """schedule a message for dispatch""" - idx = next(self._message_counter) - - self.msg_queue.put_nowait( - ( - idx, - dispatch, - args, - ) - ) - # ensure the eventloop wakes up - self.io_loop.add_callback(lambda: None) - - def start(self): - """register dispatchers for streams""" - self.io_loop = ioloop.IOLoop.current() - self.msg_queue: Queue[t.Any] = Queue() - self.io_loop.add_callback(self.dispatch_queue) - - if self.control_stream: - self.control_stream.on_recv(self.dispatch_control, copy=False) - - if self.shell_stream: - self.shell_stream.on_recv( - partial( - self.schedule_dispatch, - self.dispatch_shell, - ), - copy=False, - ) - - # publish idle status - self._publish_status("starting", "shell") + def stop(self): + self.shell_stop.set() + self.control_stop.set() def record_ports(self, ports): """Record the ports that this kernel is using. @@ -653,7 +566,7 @@ def get_parent(self, channel=None): def send_response( self, - stream, + socket, msg_or_type, content=None, ident=None, @@ -674,7 +587,7 @@ def send_response( if not self.session: return None return self.session.send( - stream, + socket, msg_or_type, content, self.get_parent(channel), @@ -703,7 +616,7 @@ def finish_metadata(self, parent, metadata, reply_content): """ return metadata - async def execute_request(self, stream, ident, parent): + async def execute_request(self, socket, ident, parent): """handle an execute_request""" if not self.session: return @@ -764,8 +677,8 @@ async def execute_request(self, stream, ident, parent): reply_content = json_clean(reply_content) metadata = self.finish_metadata(parent, metadata, reply_content) - reply_msg: dict[str, t.Any] = self.session.send( # type:ignore[assignment] - stream, + reply_msg = self.session.send( + socket, "execute_reply", reply_content, parent, @@ -775,8 +688,13 @@ async def execute_request(self, stream, ident, parent): self.log.debug("%s", reply_msg) + assert reply_msg is not None if not silent and reply_msg["content"]["status"] == "error" and stop_on_error: - self._abort_queues() + # while this flag is true, + # execute requests will be aborted + self._aborting = True + self._aborted_time = time.monotonic() + self.log.info("Aborting queue") def do_execute( self, @@ -792,7 +710,7 @@ def do_execute( """Execute user code. Must be overridden by subclasses.""" raise NotImplementedError - async def complete_request(self, stream, ident, parent): + async def complete_request(self, socket, ident, parent): """Handle a completion request.""" if not self.session: return @@ -805,7 +723,7 @@ async def complete_request(self, stream, ident, parent): matches = await matches matches = json_clean(matches) - self.session.send(stream, "complete_reply", matches, parent, ident) + self.session.send(socket, "complete_reply", matches, parent, ident) def do_complete(self, code, cursor_pos): """Override in subclasses to find completions.""" @@ -817,7 +735,7 @@ def do_complete(self, code, cursor_pos): "status": "ok", } - async def inspect_request(self, stream, ident, parent): + async def inspect_request(self, socket, ident, parent): """Handle an inspect request.""" if not self.session: return @@ -834,14 +752,14 @@ async def inspect_request(self, stream, ident, parent): # Before we send this object over, we scrub it for JSON usage reply_content = json_clean(reply_content) - msg = self.session.send(stream, "inspect_reply", reply_content, parent, ident) + msg = self.session.send(socket, "inspect_reply", reply_content, parent, ident) self.log.debug("%s", msg) def do_inspect(self, code, cursor_pos, detail_level=0, omit_sections=()): """Override in subclasses to allow introspection.""" return {"status": "ok", "data": {}, "metadata": {}, "found": False} - async def history_request(self, stream, ident, parent): + async def history_request(self, socket, ident, parent): """Handle a history request.""" if not self.session: return @@ -852,7 +770,7 @@ async def history_request(self, stream, ident, parent): reply_content = await reply_content reply_content = json_clean(reply_content) - msg = self.session.send(stream, "history_reply", reply_content, parent, ident) + msg = self.session.send(socket, "history_reply", reply_content, parent, ident) self.log.debug("%s", msg) def do_history( @@ -870,13 +788,13 @@ def do_history( """Override in subclasses to access history.""" return {"status": "ok", "history": []} - async def connect_request(self, stream, ident, parent): + async def connect_request(self, socket, ident, parent): """Handle a connect request.""" if not self.session: return content = self._recorded_ports.copy() if self._recorded_ports else {} content["status"] = "ok" - msg = self.session.send(stream, "connect_reply", content, parent, ident) + msg = self.session.send(socket, "connect_reply", content, parent, ident) self.log.debug("%s", msg) @property @@ -890,16 +808,16 @@ def kernel_info(self): "help_links": self.help_links, } - async def kernel_info_request(self, stream, ident, parent): + async def kernel_info_request(self, socket, ident, parent): """Handle a kernel info request.""" if not self.session: return content = {"status": "ok"} content.update(self.kernel_info) - msg = self.session.send(stream, "kernel_info_reply", content, parent, ident) + msg = self.session.send(socket, "kernel_info_reply", content, parent, ident) self.log.debug("%s", msg) - async def comm_info_request(self, stream, ident, parent): + async def comm_info_request(self, socket, ident, parent): """Handle a comm info request.""" if not self.session: return @@ -916,7 +834,7 @@ async def comm_info_request(self, stream, ident, parent): else: comms = {} reply_content = dict(comms=comms, status="ok") - msg = self.session.send(stream, "comm_info_reply", reply_content, parent, ident) + msg = self.session.send(socket, "comm_info_reply", reply_content, parent, ident) self.log.debug("%s", msg) def _send_interrupt_children(self): @@ -936,7 +854,7 @@ def _send_interrupt_children(self): else: os.kill(pid, SIGINT) - async def interrupt_request(self, stream, ident, parent): + async def interrupt_request(self, socket, ident, parent): """Handle an interrupt request.""" if not self.session: return @@ -953,31 +871,23 @@ async def interrupt_request(self, stream, ident, parent): "evalue": str(err), } - self.session.send(stream, "interrupt_reply", content, parent, ident=ident) + self.session.send(socket, "interrupt_reply", content, parent, ident=ident) return - async def shutdown_request(self, stream, ident, parent): + async def shutdown_request(self, socket, ident, parent): """Handle a shutdown request.""" if not self.session: return content = self.do_shutdown(parent["content"]["restart"]) if inspect.isawaitable(content): content = await content - self.session.send(stream, "shutdown_reply", content, parent, ident=ident) + self.session.send(socket, "shutdown_reply", content, parent, ident=ident) # same content, but different msg_id for broadcasting on IOPub self._shutdown_message = self.session.msg("shutdown_reply", content, parent) await self._at_shutdown() - self.log.debug("Stopping control ioloop") - if self.control_stream: - control_io_loop = self.control_stream.io_loop - control_io_loop.add_callback(control_io_loop.stop) - - self.log.debug("Stopping shell ioloop") - if self.shell_stream: - shell_io_loop = self.shell_stream.io_loop - shell_io_loop.add_callback(shell_io_loop.stop) + self.stop() def do_shutdown(self, restart): """Override in subclasses to do things when the frontend shuts down the @@ -985,7 +895,7 @@ def do_shutdown(self, restart): """ return {"status": "ok", "restart": restart} - async def is_complete_request(self, stream, ident, parent): + async def is_complete_request(self, socket, ident, parent): """Handle an is_complete request.""" if not self.session: return @@ -996,14 +906,14 @@ async def is_complete_request(self, stream, ident, parent): if inspect.isawaitable(reply_content): reply_content = await reply_content reply_content = json_clean(reply_content) - reply_msg = self.session.send(stream, "is_complete_reply", reply_content, parent, ident) + reply_msg = self.session.send(socket, "is_complete_reply", reply_content, parent, ident) self.log.debug("%s", reply_msg) def do_is_complete(self, code): """Override in subclasses to find completions.""" return {"status": "unknown"} - async def debug_request(self, stream, ident, parent): + async def debug_request(self, socket, ident, parent): """Handle a debug request.""" if not self.session: return @@ -1012,7 +922,7 @@ async def debug_request(self, stream, ident, parent): if inspect.isawaitable(reply_content): reply_content = await reply_content reply_content = json_clean(reply_content) - reply_msg = self.session.send(stream, "debug_reply", reply_content, parent, ident) + reply_msg = self.session.send(socket, "debug_reply", reply_content, parent, ident) self.log.debug("%s", reply_msg) def get_process_metric_value(self, process, name, attribute=None): @@ -1028,7 +938,7 @@ def get_process_metric_value(self, process, name, attribute=None): except BaseException: return 0 - async def usage_request(self, stream, ident, parent): + async def usage_request(self, socket, ident, parent): """Handle a usage request.""" if not self.session: return @@ -1061,7 +971,7 @@ async def usage_request(self, stream, ident, parent): reply_content["host_cpu_percent"] = cpu_percent reply_content["cpu_count"] = psutil.cpu_count(logical=True) reply_content["host_virtual_memory"] = dict(psutil.virtual_memory()._asdict()) - reply_msg = self.session.send(stream, "usage_reply", reply_content, parent, ident) + reply_msg = self.session.send(socket, "usage_reply", reply_content, parent, ident) self.log.debug("%s", reply_msg) async def do_debug_request(self, msg): @@ -1071,7 +981,7 @@ async def do_debug_request(self, msg): # Engine methods (DEPRECATED) # --------------------------------------------------------------------------- - async def apply_request(self, stream, ident, parent): # pragma: no cover + async def apply_request(self, socket, ident, parent): # pragma: no cover """Handle an apply request.""" self.log.warning("apply_request is deprecated in kernel_base, moving to ipyparallel.") try: @@ -1094,7 +1004,7 @@ async def apply_request(self, stream, ident, parent): # pragma: no cover if not self.session: return self.session.send( - stream, + socket, "apply_reply", reply_content, parent=parent, @@ -1111,7 +1021,7 @@ def do_apply(self, content, bufs, msg_id, reply_metadata): # Control messages (DEPRECATED) # --------------------------------------------------------------------------- - async def abort_request(self, stream, ident, parent): # pragma: no cover + async def abort_request(self, socket, ident, parent): # pragma: no cover """abort a specific msg by id""" self.log.warning( "abort_request is deprecated in kernel_base. It is only part of IPython parallel" @@ -1119,8 +1029,6 @@ async def abort_request(self, stream, ident, parent): # pragma: no cover msg_ids = parent["content"].get("msg_ids", None) if isinstance(msg_ids, str): msg_ids = [msg_ids] - if not msg_ids: - self._abort_queues() for mid in msg_ids: self.aborted.add(str(mid)) @@ -1128,18 +1036,18 @@ async def abort_request(self, stream, ident, parent): # pragma: no cover if not self.session: return reply_msg = self.session.send( - stream, "abort_reply", content=content, parent=parent, ident=ident + socket, "abort_reply", content=content, parent=parent, ident=ident ) self.log.debug("%s", reply_msg) - async def clear_request(self, stream, idents, parent): # pragma: no cover + async def clear_request(self, socket, idents, parent): # pragma: no cover """Clear our namespace.""" self.log.warning( "clear_request is deprecated in kernel_base. It is only part of IPython parallel" ) content = self.do_clear() if self.session: - self.session.send(stream, "clear_reply", ident=idents, parent=parent, content=content) + self.session.send(socket, "clear_reply", ident=idents, parent=parent, content=content) def do_clear(self): """DEPRECATED since 4.0.3""" @@ -1157,42 +1065,7 @@ def _topic(self, topic): _aborting = Bool(False) - def _abort_queues(self): - # while this flag is true, - # execute requests will be aborted - self._aborting = True - self.log.info("Aborting queue") - - # flush streams, so all currently waiting messages - # are added to the queue - if self.shell_stream: - self.shell_stream.flush() - - # Callback to signal that we are done aborting - # dispatch functions _must_ be async - async def stop_aborting(): - self.log.info("Finishing abort") - self._aborting = False - - # put the stop-aborting event on the message queue - # so that all messages already waiting in the queue are aborted - # before we reset the flag - schedule_stop_aborting = partial(self.schedule_dispatch, stop_aborting) - - if self.stop_on_error_timeout: - # if we have a delay, give messages this long to arrive on the queue - # before we stop aborting requests - self.io_loop.call_later(self.stop_on_error_timeout, schedule_stop_aborting) - # If we have an eventloop, it may interfere with the call_later above. - # If the loop has a _schedule_exit method, we call that so the loop exits - # after stop_on_error_timeout, returning to the main io_loop and letting - # the call_later fire. - if self.eventloop is not None and hasattr(self.eventloop, "_schedule_exit"): - self.eventloop._schedule_exit(self.stop_on_error_timeout + 0.01) - else: - schedule_stop_aborting() - - def _send_abort_reply(self, stream, msg, idents): + async def _send_abort_reply(self, socket, msg, idents): """Send a reply to an aborted request""" if not self.session: return @@ -1203,8 +1076,9 @@ def _send_abort_reply(self, stream, msg, idents): md = self.finish_metadata(msg, md, status) md.update(status) + assert self.session is not None self.session.send( - stream, + socket, reply_type, metadata=md, content=status, @@ -1389,5 +1263,3 @@ async def _at_shutdown(self): ident=self._topic("shutdown"), ) self.log.debug("%s", self._shutdown_message) - if self.control_stream: - self.control_stream.flush(zmq.POLLOUT) diff --git a/ipykernel/zmqshell.py b/ipykernel/zmqshell.py index 4fa850735..bc99d000d 100644 --- a/ipykernel/zmqshell.py +++ b/ipykernel/zmqshell.py @@ -553,9 +553,15 @@ def _showtraceback(self, etype, evalue, stb): sys.stdout.flush() sys.stderr.flush() + # For Keyboard interrupt, remove the kernel source code from the + # traceback. + ename = str(etype.__name__) + if ename == "KeyboardInterrupt": + stb.pop(-2) + exc_content = { "traceback": stb, - "ename": str(etype.__name__), + "ename": ename, "evalue": str(evalue), } @@ -612,7 +618,8 @@ def init_magics(self): """Initialize magics.""" super().init_magics() self.register_magics(KernelMagics) - self.magics_manager.register_alias("ed", "edit") + if self.magics_manager: + self.magics_manager.register_alias("ed", "edit") def init_virtualenv(self): """Initialize virtual environment.""" diff --git a/pyproject.toml b/pyproject.toml index 3f91deb2e..17093225d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -20,7 +20,7 @@ classifiers = [ ] requires-python = ">=3.8" dependencies = [ - "debugpy>=1.6.5", + "debugpy>=1.8.1", "ipython>=7.23.1", "comm>=0.1.1", "traitlets>=5.4.0", @@ -28,12 +28,12 @@ dependencies = [ "jupyter_core>=4.12,!=5.0.*", # For tk event loop support only. "nest_asyncio", - "tornado>=6.2", "matplotlib-inline>=0.1", 'appnope;platform_system=="Darwin"', - "pyzmq>=25", + "pyzmq>=25.0", "psutil", "packaging", + "anyio>=4.0.0", ] [project.urls] @@ -59,8 +59,9 @@ test = [ "flaky", "ipyparallel", "pre-commit", + "pytest-timeout", + "trio", "pytest-asyncio>=0.23.5", - "pytest-timeout" ] cov = [ "coverage[toml]", @@ -155,12 +156,12 @@ addopts = [ ] testpaths = [ "tests", - "tests/inprocess" + # "tests/inprocess" ] -asyncio_mode = "auto" -timeout = 300 +norecursedirs = "tests/inprocess" +timeout = 60 # Restore this setting to debug failures -#timeout_method = "thread" +# timeout_method = "thread" filterwarnings= [ # Fail on warnings "error", @@ -176,8 +177,9 @@ filterwarnings= [ "ignore:unclosed TIMEOUT: + raise TimeoutError() KM.interrupt_kernel() reply = KC.get_shell_msg()["content"] diff --git a/tests/test_embed_kernel.py b/tests/test_embed_kernel.py index ff97edfa5..685824071 100644 --- a/tests/test_embed_kernel.py +++ b/tests/test_embed_kernel.py @@ -206,7 +206,7 @@ def test_embed_kernel_func(): def trigger_stop(): time.sleep(1) app = IPKernelApp.instance() - app.io_loop.add_callback(app.io_loop.stop) + app.stop() IPKernelApp.clear_instance() thread = threading.Thread(target=trigger_stop) diff --git a/tests/test_eventloop.py b/tests/test_eventloop.py index ee9a68fca..34581b7fb 100644 --- a/tests/test_eventloop.py +++ b/tests/test_eventloop.py @@ -108,7 +108,7 @@ def do_thing(): @windows_skip def test_asyncio_loop(kernel): def do_thing(): - loop.call_soon(loop.stop) + loop.call_later(0.01, loop.stop) loop = asyncio.get_event_loop() loop.call_soon(do_thing) diff --git a/tests/test_io.py b/tests/test_io.py index 0e23b4b14..e49bc2769 100644 --- a/tests/test_io.py +++ b/tests/test_io.py @@ -12,6 +12,7 @@ import pytest import zmq +import zmq.asyncio from jupyter_client.session import Session from ipykernel.iostream import MASTER, BackgroundSocket, IOPubThread, OutStream @@ -19,7 +20,7 @@ @pytest.fixture() def ctx(): - ctx = zmq.Context() + ctx = zmq.asyncio.Context() yield ctx ctx.destroy() @@ -64,23 +65,23 @@ def test_io_isatty(iopub_thread): assert stream.isatty() -def test_io_thread(iopub_thread): +async def test_io_thread(anyio_backend, iopub_thread): thread = iopub_thread thread._setup_pipe_in() msg = [thread._pipe_uuid, b"a"] - thread._handle_pipe_msg(msg) + await thread._handle_pipe_msg(msg) ctx1, pipe = thread._setup_pipe_out() pipe.close() - thread._pipe_in.close() + thread._pipe_in1.close() thread._check_mp_mode = lambda: MASTER thread._really_send([b"hi"]) ctx1.destroy() - thread.close() + thread.stop() thread.close() thread._really_send(None) -def test_background_socket(iopub_thread): +async def test_background_socket(anyio_backend, iopub_thread): sock = BackgroundSocket(iopub_thread) assert sock.__class__ == BackgroundSocket with warnings.catch_warnings(): @@ -91,9 +92,10 @@ def test_background_socket(iopub_thread): sock.send(b"hi") -def test_outstream(iopub_thread): +async def test_outstream(anyio_backend, iopub_thread): session = Session() pub = iopub_thread.socket + with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) stream = OutStream(session, pub, "stdout") @@ -116,6 +118,7 @@ def test_outstream(iopub_thread): assert stream.writable() +@pytest.mark.anyio() async def test_event_pipe_gc(iopub_thread): session = Session(key=b"abc") stream = OutStream( @@ -129,23 +132,22 @@ async def test_event_pipe_gc(iopub_thread): with stream, mock.patch.object(sys, "stdout", stream), ThreadPoolExecutor(1) as pool: pool.submit(print, "x").result() pool_thread = pool.submit(threading.current_thread).result() - assert list(iopub_thread._event_pipes) == [pool_thread] + threads = list(iopub_thread._event_pipes) + assert threads[0] == pool_thread # run gc once in the iopub thread f: Future = Future() - async def run_gc(): - try: - await iopub_thread._event_pipe_gc() - except Exception as e: - f.set_exception(e) - else: - f.set_result(None) + try: + await iopub_thread._event_pipe_gc() + except Exception as e: + f.set_exception(e) + else: + f.set_result(None) - iopub_thread.io_loop.add_callback(run_gc) # wait for call to finish in iopub thread f.result() - assert iopub_thread._event_pipes == {} + # assert iopub_thread._event_pipes == {} def subprocess_test_echo_watch(): @@ -153,7 +155,7 @@ def subprocess_test_echo_watch(): session = Session(key=b"abc") # use PUSH socket to avoid subscription issues - with zmq.Context() as ctx, ctx.socket(zmq.PUSH) as pub: + with zmq.asyncio.Context() as ctx, ctx.socket(zmq.PUSH) as pub: pub.connect(os.environ["IOPUB_URL"]) iopub_thread = IOPubThread(pub) iopub_thread.start() @@ -190,8 +192,9 @@ def subprocess_test_echo_watch(): iopub_thread.close() +@pytest.mark.anyio() @pytest.mark.skipif(sys.platform.startswith("win"), reason="Windows") -def test_echo_watch(ctx): +async def test_echo_watch(ctx): """Test echo on underlying FD while capturing the same FD Test runs in a subprocess to avoid messing with pytest output capturing. @@ -221,8 +224,10 @@ def test_echo_watch(ctx): print(f"{p.stdout=}") print(f"{p.stderr}=", file=sys.stderr) assert p.returncode == 0 - while s.poll(timeout=100): - ident, msg = session.recv(s) + while await s.poll(timeout=100): + msg = await s.recv_multipart() + ident, msg = session.feed_identities(msg, copy=True) + msg = session.deserialize(msg, content=True, copy=True) assert msg is not None # for type narrowing if msg["header"]["msg_type"] == "stream" and msg["content"]["name"] == "stdout": stdout_chunks.append(msg["content"]["text"]) diff --git a/tests/test_ipkernel_direct.py b/tests/test_ipkernel_direct.py index 037489f34..cea2ec994 100644 --- a/tests/test_ipkernel_direct.py +++ b/tests/test_ipkernel_direct.py @@ -4,7 +4,6 @@ import os import pytest -import zmq from IPython.core.history import DummyDB from ipykernel.comm.comm import BaseComm @@ -149,19 +148,21 @@ async def test_direct_clear(ipkernel): ipkernel.do_clear() +@pytest.mark.skip("ipykernel._cancel_on_sigint doesn't exist anymore") async def test_cancel_on_sigint(ipkernel: IPythonKernel) -> None: future: asyncio.Future = asyncio.Future() - with ipkernel._cancel_on_sigint(future): - pass + # with ipkernel._cancel_on_sigint(future): + # pass future.set_result(None) -def test_dispatch_debugpy(ipkernel: IPythonKernel) -> None: +async def test_dispatch_debugpy(ipkernel: IPythonKernel) -> None: msg = ipkernel.session.msg("debug_request", {}) msg_list = ipkernel.session.serialize(msg) - ipkernel.dispatch_debugpy([zmq.Message(m) for m in msg_list]) + await ipkernel.receive_debugpy_message(msg_list) +@pytest.mark.skip("Queues don't exist anymore") async def test_start(ipkernel: IPythonKernel) -> None: shell_future: asyncio.Future = asyncio.Future() @@ -176,6 +177,7 @@ async def fake_dispatch_queue(): await shell_future +@pytest.mark.skip("Queues don't exist anymore") async def test_start_no_debugpy(ipkernel: IPythonKernel) -> None: shell_future: asyncio.Future = asyncio.Future() diff --git a/tests/test_kernel_direct.py b/tests/test_kernel_direct.py index dfb8a70fe..ea3c6fe7e 100644 --- a/tests/test_kernel_direct.py +++ b/tests/test_kernel_direct.py @@ -104,6 +104,7 @@ async def test_direct_debug_request(kernel): assert reply["header"]["msg_type"] == "debug_reply" +@pytest.mark.skip("Shell streams don't exist anymore") async def test_deprecated_features(kernel): with warnings.catch_warnings(): warnings.simplefilter("ignore", DeprecationWarning) @@ -119,33 +120,26 @@ async def test_deprecated_features(kernel): async def test_process_control(kernel): from jupyter_client.session import DELIM - class FakeMsg: - def __init__(self, bytes): - self.bytes = bytes - - await kernel.process_control([FakeMsg(DELIM), 1]) + await kernel.process_control_message([DELIM, 1]) msg = kernel._prep_msg("does_not_exist") - await kernel.process_control(msg) + await kernel.process_control_message(msg) -def test_should_handle(kernel): +async def test_should_handle(kernel): msg = kernel.session.msg("debug_request", {}) kernel.aborted.add(msg["header"]["msg_id"]) - assert not kernel.should_handle(kernel.control_stream, msg, []) + assert not await kernel.should_handle(kernel.control_socket, msg, []) async def test_dispatch_shell(kernel): from jupyter_client.session import DELIM - class FakeMsg: - def __init__(self, bytes): - self.bytes = bytes - - await kernel.dispatch_shell([FakeMsg(DELIM), 1]) + await kernel.process_shell_message([DELIM, 1]) msg = kernel._prep_msg("does_not_exist") - await kernel.dispatch_shell(msg) + await kernel.process_shell_message(msg) +@pytest.mark.skip("kernelbase.do_one_iteration doesn't exist anymore") async def test_do_one_iteration(kernel): kernel.msg_queue = asyncio.Queue() await kernel.do_one_iteration() @@ -156,7 +150,7 @@ async def test_publish_debug_event(kernel): async def test_connect_request(kernel): - await kernel.connect_request(kernel.shell_stream, "foo", {}) + await kernel.connect_request(kernel.shell_socket, b"foo", {}) async def test_send_interrupt_children(kernel): diff --git a/tests/test_kernelapp.py b/tests/test_kernelapp.py index da38777d0..6b9f451b9 100644 --- a/tests/test_kernelapp.py +++ b/tests/test_kernelapp.py @@ -2,7 +2,6 @@ import os import threading import time -from unittest.mock import patch import pytest from jupyter_core.paths import secure_write @@ -40,7 +39,7 @@ def test_start_app(): def trigger_stop(): time.sleep(1) - app.io_loop.add_callback(app.io_loop.stop) + app.stop() thread = threading.Thread(target=trigger_stop) thread.start() @@ -121,11 +120,17 @@ def test_merge_connection_file(): @pytest.mark.skipif(trio is None, reason="requires trio") def test_trio_loop(): app = IPKernelApp(trio_loop=True) + + def trigger_stop(): + time.sleep(1) + app.stop() + + thread = threading.Thread(target=trigger_stop) + thread.start() + app.kernel = MockKernel() app.init_sockets() - with patch("ipykernel.trio_runner.TrioRunner.run", lambda _: None): - app.start() + app.start() app.cleanup_connection_file() - app.io_loop.add_callback(app.io_loop.stop) app.kernel.destroy() app.close() diff --git a/tests/test_message_spec.py b/tests/test_message_spec.py index db6ea7d7f..d9d8bb810 100644 --- a/tests/test_message_spec.py +++ b/tests/test_message_spec.py @@ -5,6 +5,7 @@ import re import sys +import time from queue import Empty import pytest @@ -364,7 +365,6 @@ def test_execute_stop_on_error(): KC.execute(code='print("Hello")') KC.execute(code='print("world")') reply = KC.get_shell_msg(timeout=TIMEOUT) - print(reply) reply = KC.get_shell_msg(timeout=TIMEOUT) assert reply["content"]["status"] == "aborted" # second message, too @@ -595,10 +595,17 @@ def test_stream(): msg_id, reply = execute("print('hi')") - stdout = KC.get_iopub_msg(timeout=TIMEOUT) - validate_message(stdout, "stream", msg_id) - content = stdout["content"] - assert content["text"] == "hi\n" + stream = "" + t0 = time.monotonic() + while True: + msg = KC.get_iopub_msg(timeout=TIMEOUT) + validate_message(msg, "stream", msg_id) + stream += msg["content"]["text"] + assert "hi\n".startswith(stream) + if stream == "hi\n": + break + if time.monotonic() - t0 > TIMEOUT: + raise TimeoutError() def test_display_data(): diff --git a/tests/test_pickleutil.py b/tests/test_pickleutil.py index c48eadf77..2c55a30e4 100644 --- a/tests/test_pickleutil.py +++ b/tests/test_pickleutil.py @@ -1,10 +1,16 @@ import pickle +import sys import warnings +import pytest + with warnings.catch_warnings(): warnings.simplefilter("ignore") from ipykernel.pickleutil import can, uncan +if sys.platform.startswith("win"): + pytest.skip("skipping pickle tests on windows", allow_module_level=True) + def interactive(f): f.__module__ = "__main__" From 0bc51f2aacc328ea8abff9b4634abd551a850507 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Krassowski?= <5832902+krassowski@users.noreply.github.com> Date: Mon, 25 Mar 2024 16:47:36 +0000 Subject: [PATCH 06/97] Do not import debugger/debugpy unless needed (#1223) --- ipykernel/ipkernel.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/ipykernel/ipkernel.py b/ipykernel/ipkernel.py index 15242933b..dabb0c445 100644 --- a/ipykernel/ipkernel.py +++ b/ipykernel/ipkernel.py @@ -21,7 +21,6 @@ from .comm.comm import BaseComm from .comm.manager import CommManager from .compiler import XCachingCompiler -from .debugger import Debugger, _is_debugpy_available from .eventloops import _use_appnope from .iostream import OutStream from .kernelbase import Kernel as KernelBase @@ -75,9 +74,7 @@ class IPythonKernel(KernelBase): help="Set this flag to False to deactivate the use of experimental IPython completion APIs.", ).tag(config=True) - debugpy_socket = ( - Instance(zmq.asyncio.Socket, allow_none=True) if _is_debugpy_available else None - ) + debugpy_socket = Instance(zmq.asyncio.Socket, allow_none=True) user_module = Any() @@ -107,6 +104,8 @@ def __init__(self, **kwargs): self.executing_blocking_code_in_main_shell = False + from .debugger import Debugger, _is_debugpy_available + # Initialize the Debugger if _is_debugpy_available: self.debugger = Debugger( @@ -214,6 +213,8 @@ async def process_debugpy(self): tg.cancel_scope.cancel() async def receive_debugpy_messages(self): + from .debugger import _is_debugpy_available + if not _is_debugpy_available: return @@ -221,6 +222,8 @@ async def receive_debugpy_messages(self): await self.receive_debugpy_message() async def receive_debugpy_message(self, msg=None): + from .debugger import _is_debugpy_available + if not _is_debugpy_available: return @@ -506,6 +509,8 @@ def do_complete(self, code, cursor_pos): async def do_debug_request(self, msg): """Handle a debug request.""" + from .debugger import _is_debugpy_available + if _is_debugpy_available: return await self.debugger.process_request(msg) return None From 8d60e674db5c10738b65ced587aa76242f693dfd Mon Sep 17 00:00:00 2001 From: Ian Thomas Date: Wed, 3 Apr 2024 12:56:02 +0100 Subject: [PATCH 07/97] Correctly handle with_cell_id in async do_execute (#1227) --- ipykernel/ipkernel.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ipykernel/ipkernel.py b/ipykernel/ipkernel.py index dabb0c445..db83d986f 100644 --- a/ipykernel/ipkernel.py +++ b/ipykernel/ipkernel.py @@ -382,7 +382,7 @@ async def run_cell(*args, **kwargs): store_history=store_history, silent=silent, ) - if with_cell_id: + if with_cell_id and with_cell_id["cell_id"]: kwargs.update(cell_id=cell_id) if should_run_async( From 5534fd99b7c0a936392b35ad20bee026790332bd Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Thu, 4 Apr 2024 09:16:37 -0500 Subject: [PATCH 08/97] Set all min deps (#1229) --- pyproject.toml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 17093225d..909308af9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -27,12 +27,12 @@ dependencies = [ "jupyter_client>=8.0.0", "jupyter_core>=4.12,!=5.0.*", # For tk event loop support only. - "nest_asyncio", + "nest_asyncio>=1.4", "matplotlib-inline>=0.1", - 'appnope;platform_system=="Darwin"', + 'appnope>=0.1.2;platform_system=="Darwin"', "pyzmq>=25.0", - "psutil", - "packaging", + "psutil>=5.7", + "packaging>=22", "anyio>=4.0.0", ] From a7d66ae2197e0d7471ba160542cf5ff7713084b5 Mon Sep 17 00:00:00 2001 From: Steven Silvester Date: Mon, 8 Apr 2024 07:32:26 -0500 Subject: [PATCH 09/97] Add compat with pytest 8 (#1231) --- pyproject.toml | 2 +- tests/__init__.py | 9 +++++---- tests/test_async.py | 7 +++---- tests/test_eventloop.py | 7 +++---- tests/test_message_spec.py | 3 ++- 5 files changed, 14 insertions(+), 14 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 909308af9..cdf265f63 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,7 @@ docs = [ "trio" ] test = [ - "pytest>=7.0,<8", + "pytest>=7.0,<9", "pytest-cov", "flaky", "ipyparallel", diff --git a/tests/__init__.py b/tests/__init__.py index 013114bd1..ee324a6fa 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -7,6 +7,8 @@ import tempfile from unittest.mock import patch +import pytest + from ipykernel.kernelspec import install pjoin = os.path.join @@ -15,7 +17,8 @@ patchers: list = [] -def setup(): +@pytest.fixture(autouse=True) +def _global_setup(): """setup temporary env for tests""" global tmp tmp = tempfile.mkdtemp() @@ -34,9 +37,7 @@ def setup(): # install IPython in the temp home: install(user=True) - - -def teardown(): + yield for p in patchers: p.stop() diff --git a/tests/test_async.py b/tests/test_async.py index 422673299..a40db4a00 100644 --- a/tests/test_async.py +++ b/tests/test_async.py @@ -11,14 +11,13 @@ KC = KM = None -def setup_function(): +@pytest.fixture(autouse=True) +def _setup_env(): """start the global kernel (if it isn't running) and return its client""" global KM, KC KM, KC = start_new_kernel() flush_channels(KC) - - -def teardown_function(): + yield assert KC is not None assert KM is not None KC.stop_channels() diff --git a/tests/test_eventloop.py b/tests/test_eventloop.py index 34581b7fb..77596eedd 100644 --- a/tests/test_eventloop.py +++ b/tests/test_eventloop.py @@ -42,14 +42,13 @@ def _get_qt_vers(): _get_qt_vers() -def setup(): +@pytest.fixture(autouse=True) +def _setup_env(): """start the global kernel (if it isn't running) and return its client""" global KM, KC KM, KC = start_new_kernel() flush_channels(KC) - - -def teardown(): + yield assert KM is not None assert KC is not None KC.stop_channels() diff --git a/tests/test_message_spec.py b/tests/test_message_spec.py index d9d8bb810..d98503ee7 100644 --- a/tests/test_message_spec.py +++ b/tests/test_message_spec.py @@ -22,7 +22,8 @@ KC: BlockingKernelClient = None # type:ignore -def setup(): +@pytest.fixture(autouse=True) +def _setup_env(): global KC KC = start_global_kernel() From 31164673d6f8faea3b806b74dd0ea27b8769ed7b Mon Sep 17 00:00:00 2001 From: Marc Udoff Date: Fri, 28 Jun 2024 09:30:50 -0400 Subject: [PATCH 10/97] Update version to 7.0.0 (#1241) --- ipykernel/_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ipykernel/_version.py b/ipykernel/_version.py index 77ccbb0a3..8a7bc44eb 100644 --- a/ipykernel/_version.py +++ b/ipykernel/_version.py @@ -5,7 +5,7 @@ from typing import List # Version string must appear intact for hatch versioning -__version__ = "6.29.3" +__version__ = "7.0.0" # Build up version_info tuple for backwards compatibility pattern = r"(?P\d+).(?P\d+).(?P\d+)(?P.*)" From 5641346aba9abb7d53c09fb1a5cd9016ed3cdf84 Mon Sep 17 00:00:00 2001 From: Ian Thomas Date: Mon, 1 Jul 2024 09:14:09 +0100 Subject: [PATCH 11/97] Explicitly close memory object streams (#1253) --- tests/conftest.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/conftest.py b/tests/conftest.py index 61a1ea474..2c2665551 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -63,6 +63,12 @@ def __init__(self, sockets, *args, **kwargs): send_stream, receive_stream = create_memory_object_stream(max_buffer_size=inf) self._streams[socket] = {"send": send_stream, "receive": receive_stream} + def close(self): + for streams in self._streams.values(): + for stream in streams.values(): + stream.close() + self._streams.clear() + def send(self, socket, *args, **kwargs): msg = super().send(socket, *args, **kwargs) send_stream: MemoryObjectSendStream[Any] = self._streams[socket]["send"] @@ -102,6 +108,7 @@ async def do_debug_request(self, msg): def destroy(self): self.stop() + self.session.close() for socket in self.test_sockets: socket.close() self.context.destroy() From eb10a0dd4fed3bf611c0ac7dfd2ff2de2721aaac Mon Sep 17 00:00:00 2001 From: Gregory Shklover Date: Mon, 1 Jul 2024 16:36:32 +0300 Subject: [PATCH 12/97] Fixed error accessing sys.stdout/sys.stderr when those are None (#1247) --- ipykernel/inprocess/ipkernel.py | 6 ++++-- ipykernel/kernelbase.py | 30 ++++++++++++++++++++---------- 2 files changed, 24 insertions(+), 12 deletions(-) diff --git a/ipykernel/inprocess/ipkernel.py b/ipykernel/inprocess/ipkernel.py index 416be5a48..114e231d9 100644 --- a/ipykernel/inprocess/ipkernel.py +++ b/ipykernel/inprocess/ipkernel.py @@ -97,8 +97,10 @@ def _abort_queues(self): def _input_request(self, prompt, ident, parent, password=False): # Flush output before making the request. self.raw_input_str = None - sys.stderr.flush() - sys.stdout.flush() + if sys.stdout is not None: + sys.stdout.flush() + if sys.stderr is not None: + sys.stderr.flush() # Send the input request. content = json_clean(dict(prompt=prompt, password=password)) diff --git a/ipykernel/kernelbase.py b/ipykernel/kernelbase.py index 64a935273..e507964b2 100644 --- a/ipykernel/kernelbase.py +++ b/ipykernel/kernelbase.py @@ -296,8 +296,10 @@ async def process_control_message(self, msg=None): except Exception: self.log.error("Exception in control handler:", exc_info=True) # noqa: G201 - sys.stdout.flush() - sys.stderr.flush() + if sys.stdout is not None: + sys.stdout.flush() + if sys.stderr is not None: + sys.stderr.flush() self._publish_status("idle", "control") async def should_handle(self, stream, msg, idents): @@ -439,8 +441,10 @@ async def process_shell_message(self, msg=None): except Exception: self.log.debug("Unable to signal in post_handler_hook:", exc_info=True) - sys.stdout.flush() - sys.stderr.flush() + if sys.stdout is not None: + sys.stdout.flush() + if sys.stderr is not None: + sys.stderr.flush() self._publish_status("idle", "shell") async def control_main(self): @@ -665,8 +669,10 @@ async def execute_request(self, socket, ident, parent): reply_content = await reply_content # Flush output before sending the reply. - sys.stdout.flush() - sys.stderr.flush() + if sys.stdout is not None: + sys.stdout.flush() + if sys.stderr is not None: + sys.stderr.flush() # FIXME: on rare occasions, the flush doesn't seem to make it to the # clients... This seems to mitigate the problem, but we definitely need # to better understand what's going on. @@ -997,8 +1003,10 @@ async def apply_request(self, socket, ident, parent): # pragma: no cover reply_content, result_buf = self.do_apply(content, bufs, msg_id, md) # flush i/o - sys.stdout.flush() - sys.stderr.flush() + if sys.stdout is not None: + sys.stdout.flush() + if sys.stderr is not None: + sys.stderr.flush() md = self.finish_metadata(parent, md, reply_content) if not self.session: @@ -1136,8 +1144,10 @@ def raw_input(self, prompt=""): def _input_request(self, prompt, ident, parent, password=False): # Flush output before making the request. - sys.stderr.flush() - sys.stdout.flush() + if sys.stdout is not None: + sys.stdout.flush() + if sys.stderr is not None: + sys.stderr.flush() # flush the stdin socket, to purge stale replies while True: From b47db6f082ea61e9688b4eca4e92529c1e0e6c45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miro=20Hron=C4=8Dok?= Date: Mon, 1 Jul 2024 15:37:07 +0200 Subject: [PATCH 13/97] Avoid a DeprecationWarning on Python 3.13+ (#1248) --- ipykernel/jsonutil.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ipykernel/jsonutil.py b/ipykernel/jsonutil.py index 6a463cf1b..e45f06e53 100644 --- a/ipykernel/jsonutil.py +++ b/ipykernel/jsonutil.py @@ -26,7 +26,7 @@ # holy crap, strptime is not threadsafe. # Calling it once at import seems to help. -datetime.strptime("1", "%d") +datetime.strptime("2000-01-01", "%Y-%m-%d") # ----------------------------------------------------------------------------- # Classes and functions From 864f3814c9555b9a6e8213013859e61c54241df4 Mon Sep 17 00:00:00 2001 From: Steve Kowalik Date: Mon, 1 Jul 2024 23:38:13 +1000 Subject: [PATCH 14/97] Catch IPython 8.24 DeprecationWarnings (#1242) --- pyproject.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index cdf265f63..d94815649 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -172,6 +172,8 @@ filterwarnings= [ # IPython warnings "ignore: `Completer.complete` is pending deprecation since IPython 6.0 and will be replaced by `Completer.completions`:PendingDeprecationWarning", + "ignore: backends is deprecated since IPython 8.24, backends are managed in matplotlib and can be externally registered.:DeprecationWarning", + "ignore: backend2gui is deprecated since IPython 8.24, backends are managed in matplotlib and can be externally registered.:DeprecationWarning", # Ignore jupyter_client warnings "ignore:unclosed Date: Sat, 6 Jul 2024 20:05:38 -0500 Subject: [PATCH 15/97] Build docs on ubuntu (#1257) --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e3b56ba25..233aabbda 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -98,7 +98,7 @@ jobs: token: ${{ secrets.GITHUB_TOKEN }} test_docs: - runs-on: windows-latest + runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 From a4ea0e54491e9b0fd2a34c54ae1d74cbd0c00304 Mon Sep 17 00:00:00 2001 From: Ian Thomas Date: Tue, 6 Aug 2024 09:35:03 +0100 Subject: [PATCH 16/97] Forward port changelog for 6.29.4 and 5 to main branch (#1263) --- CHANGELOG.md | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 62b5d716e..a4a8d0a0c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,44 @@ +## 6.29.5 + +([Full Changelog](https://github.com/ipython/ipykernel/compare/v6.29.4...1e62d48298e353a9879fae99bc752f9bb48797ef)) + +### Bugs fixed + +- Fix use of "%matplotlib osx" [#1237](https://github.com/ipython/ipykernel/pull/1237) ([@ianthomas23](https://github.com/ianthomas23)) + +### Maintenance and upkeep improvements + +- \[6.x\] Update Release Scripts [#1251](https://github.com/ipython/ipykernel/pull/1251) ([@blink1073](https://github.com/blink1073)) + +### Contributors to this release + +([GitHub contributors page for this release](https://github.com/ipython/ipykernel/graphs/contributors?from=2024-03-27&to=2024-06-29&type=c)) + +[@blink1073](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Ablink1073+updated%3A2024-03-27..2024-06-29&type=Issues) | [@ianthomas23](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aianthomas23+updated%3A2024-03-27..2024-06-29&type=Issues) + + + +## 6.29.4 + +([Full Changelog](https://github.com/ipython/ipykernel/compare/v6.29.3...1cea5332ffc37f32e8232fd2b8b8ddd91b2bbdcf)) + +### Bugs fixed + +- Fix side effect import for pickleutil [#1216](https://github.com/ipython/ipykernel/pull/1216) ([@blink1073](https://github.com/blink1073)) + +### Maintenance and upkeep improvements + +- Do not import debugger/debugpy unless needed [#1223](https://github.com/ipython/ipykernel/pull/1223) ([@krassowski](https://github.com/krassowski)) + +### Contributors to this release + +([GitHub contributors page for this release](https://github.com/ipython/ipykernel/graphs/contributors?from=2024-02-26&to=2024-03-27&type=c)) + +[@agronholm](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aagronholm+updated%3A2024-02-26..2024-03-27&type=Issues) | [@blink1073](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Ablink1073+updated%3A2024-02-26..2024-03-27&type=Issues) | [@davidbrochart](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Adavidbrochart+updated%3A2024-02-26..2024-03-27&type=Issues) | [@krassowski](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Akrassowski+updated%3A2024-02-26..2024-03-27&type=Issues) | [@minrk](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aminrk+updated%3A2024-02-26..2024-03-27&type=Issues) + ## 6.29.3 ([Full Changelog](https://github.com/ipython/ipykernel/compare/v6.29.2...de2221ce155668c343084fde37b77fb6b1671dc9)) From 1de2fb2b8c6de4a11961e4adbe0ee1afbb5da5d2 Mon Sep 17 00:00:00 2001 From: Ian Thomas Date: Tue, 6 Aug 2024 10:08:40 +0100 Subject: [PATCH 17/97] Ignore warning from trio (#1262) --- pyproject.toml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index d94815649..9d9ebd618 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -181,9 +181,12 @@ filterwarnings= [ "ignore:There is no current event loop:DeprecationWarning", "ignore:zmq.eventloop.ioloop is deprecated in pyzmq 17. pyzmq now works with default tornado and asyncio eventloops.:DeprecationWarning", "module:Jupyter is migrating its paths to use standard platformdirs:DeprecationWarning", - "ignore:trio.MultiError is deprecated since Trio 0.22.0:trio.TrioDeprecationWarning", + # Ignore datetime warning. "ignore:datetime.datetime.utc:DeprecationWarning", + + # https://github.com/python-trio/trio/issues/3053 + "ignore:The `hash` argument is deprecated in favor of `unsafe_hash` and will be removed in or after August 2025.", ] [tool.coverage.report] From 2b925be9704e47065b93ce852fa1dd006e9e71ce Mon Sep 17 00:00:00 2001 From: Ian Thomas Date: Fri, 9 Aug 2024 10:26:34 +0100 Subject: [PATCH 18/97] Specify argtypes when using macos msg (#1264) --- ipykernel/_eventloop_macos.py | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/ipykernel/_eventloop_macos.py b/ipykernel/_eventloop_macos.py index 3a6692fce..c55546fe2 100644 --- a/ipykernel/_eventloop_macos.py +++ b/ipykernel/_eventloop_macos.py @@ -17,7 +17,6 @@ objc.objc_getClass.restype = void_p objc.sel_registerName.restype = void_p objc.objc_msgSend.restype = void_p -objc.objc_msgSend.argtypes = [void_p, void_p] msg = objc.objc_msgSend @@ -80,11 +79,25 @@ def C(classname): def _NSApp(): """Return the global NSApplication instance (NSApp)""" + objc.objc_msgSend.argtypes = [void_p, void_p] return msg(C("NSApplication"), n("sharedApplication")) def _wake(NSApp): """Wake the Application""" + objc.objc_msgSend.argtypes = [ + void_p, + void_p, + void_p, + void_p, + void_p, + void_p, + void_p, + void_p, + void_p, + void_p, + void_p, + ] event = msg( C("NSEvent"), n( @@ -101,6 +114,7 @@ def _wake(NSApp): 0, # data1 0, # data2 ) + objc.objc_msgSend.argtypes = [void_p, void_p, void_p, void_p] msg(NSApp, n("postEvent:atStart:"), void_p(event), True) @@ -113,7 +127,9 @@ def stop(timer=None, loop=None): NSApp = _NSApp() # if NSApp is not running, stop CFRunLoop directly, # otherwise stop and wake NSApp + objc.objc_msgSend.argtypes = [void_p, void_p] if msg(NSApp, n("isRunning")): + objc.objc_msgSend.argtypes = [void_p, void_p, void_p] msg(NSApp, n("stop:"), NSApp) _wake(NSApp) else: @@ -148,6 +164,7 @@ def mainloop(duration=1): _triggered.clear() NSApp = _NSApp() _stop_after(duration) + objc.objc_msgSend.argtypes = [void_p, void_p] msg(NSApp, n("run")) if not _triggered.is_set(): # app closed without firing callback, From 1ca8f2c68e24f2be30788da858c08ae3e2ca452c Mon Sep 17 00:00:00 2001 From: Ian Thomas Date: Fri, 16 Aug 2024 09:10:02 +0100 Subject: [PATCH 19/97] Fix eventloop integration with anyio (#1265) --- ipykernel/eventloops.py | 22 +++++++++++++++------- ipykernel/kernelapp.py | 10 ++++++++-- ipykernel/kernelbase.py | 22 ++++++++++++---------- pyproject.toml | 2 +- tests/test_kernelapp.py | 3 ++- 5 files changed, 38 insertions(+), 21 deletions(-) diff --git a/ipykernel/eventloops.py b/ipykernel/eventloops.py index 4c3a18cbc..baca4dcdb 100644 --- a/ipykernel/eventloops.py +++ b/ipykernel/eventloops.py @@ -93,11 +93,11 @@ def process_stream_events(): # due to our consuming of the edge-triggered FD # flush returns the number of events consumed. # if there were any, wake it up - if kernel.shell_stream.flush(limit=1): + if (kernel.shell_socket.get(zmq.EVENTS) & zmq.POLLIN) > 0: exit_loop() if not hasattr(kernel, "_qt_notifier"): - fd = kernel.shell_stream.getsockopt(zmq.FD) + fd = kernel.shell_socket.getsockopt(zmq.FD) kernel._qt_notifier = QtCore.QSocketNotifier( fd, enum_helper("QtCore.QSocketNotifier.Type").Read, kernel.app.qt_event_loop ) @@ -179,7 +179,7 @@ def loop_wx(kernel): def wake(): """wake from wx""" - if kernel.shell_stream.flush(limit=1): + if (kernel.shell_socket.get(zmq.EVENTS) & zmq.POLLIN) > 0: kernel.app.ExitMainLoop() return @@ -248,14 +248,14 @@ def __init__(self, app): def exit_loop(): """fall back to main loop""" - app.tk.deletefilehandler(kernel.shell_stream.getsockopt(zmq.FD)) + app.tk.deletefilehandler(kernel.shell_socket.getsockopt(zmq.FD)) app.quit() app.destroy() del kernel.app_wrapper def process_stream_events(*a, **kw): """fall back to main loop when there's a socket event""" - if kernel.shell_stream.flush(limit=1): + if (kernel.shell_socket.get(zmq.EVENTS) & zmq.POLLIN) > 0: exit_loop() # allow for scheduling exits from the loop in case a timeout needs to @@ -269,7 +269,7 @@ def _schedule_exit(delay): # For Tkinter, we create a Tk object and call its withdraw method. kernel.app_wrapper = BasicAppWrapper(app) app.tk.createfilehandler( - kernel.shell_stream.getsockopt(zmq.FD), READABLE, process_stream_events + kernel.shell_socket.getsockopt(zmq.FD), READABLE, process_stream_events ) # schedule initial call after start app.after(0, process_stream_events) @@ -377,7 +377,7 @@ def handle_int(etype, value, tb): # don't let interrupts during mainloop invoke crash_handler: sys.excepthook = handle_int mainloop(kernel._poll_interval) - if kernel.shell_stream.flush(limit=1): + if (kernel.shell_socket.get(zmq.EVENTS) & zmq.POLLIN) > 0: # events to process, return control to kernel return except BaseException: @@ -604,3 +604,11 @@ def enable_gui(gui, kernel=None): kernel.eventloop = loop # We set `eventloop`; the function the user chose is executed in `Kernel.enter_eventloop`, thus # any exceptions raised during the event loop will not be shown in the client. + + # If running in async loop then set anyio event to trigger starting the eventloop. + # If not running in async loop do nothing as this will be handled in IPKernelApp.main(). + try: + kernel._eventloop_set.set() + except RuntimeError: + # Expecting sniffio.AsyncLibraryNotFoundError but don't want to import sniffio just for that + pass diff --git a/ipykernel/kernelapp.py b/ipykernel/kernelapp.py index 98b08b845..c02c3cf32 100644 --- a/ipykernel/kernelapp.py +++ b/ipykernel/kernelapp.py @@ -729,12 +729,18 @@ def start(self) -> None: run(self.main, backend=backend) return + async def _wait_to_enter_eventloop(self): + await self.kernel._eventloop_set.wait() + await self.kernel.enter_eventloop() + async def main(self): async with create_task_group() as tg: - if self.kernel.eventloop: - tg.start_soon(self.kernel.enter_eventloop) + tg.start_soon(self._wait_to_enter_eventloop) tg.start_soon(self.kernel.start) + if self.kernel.eventloop: + self.kernel._eventloop_set.set() + def stop(self): """Stop the kernel, thread-safe.""" self.kernel.stop() diff --git a/ipykernel/kernelbase.py b/ipykernel/kernelbase.py index e507964b2..e5f5c1eec 100644 --- a/ipykernel/kernelbase.py +++ b/ipykernel/kernelbase.py @@ -36,7 +36,7 @@ import psutil import zmq -from anyio import TASK_STATUS_IGNORED, create_task_group, sleep, to_thread +from anyio import TASK_STATUS_IGNORED, Event, create_task_group, sleep, to_thread from anyio.abc import TaskStatus from IPython.core.error import StdinNotImplementedError from jupyter_client.session import Session @@ -229,6 +229,8 @@ def _parent_header(self): "usage_request", ] + _eventloop_set: Event = Event() + def __init__(self, **kwargs): """Initialize the kernel.""" super().__init__(**kwargs) @@ -321,7 +323,9 @@ async def enter_eventloop(self): # record handle, so we can check when this changes eventloop = self.eventloop if eventloop is None: - self.log.info("Exiting as there is no eventloop") + # Do not warn if shutting down. + if not (hasattr(self, "shell") and self.shell.exit_now): + self.log.info("Exiting as there is no eventloop") return async def advance_eventloop(): @@ -335,21 +339,15 @@ async def advance_eventloop(): except KeyboardInterrupt: # Ctrl-C shouldn't crash the kernel self.log.error("KeyboardInterrupt caught in kernel") - if self.eventloop is eventloop: - # schedule advance again - await schedule_next() - async def schedule_next(): - """Schedule the next advance of the eventloop""" + # begin polling the eventloop + while self.eventloop is eventloop: # flush the eventloop every so often, # giving us a chance to handle messages in the meantime self.log.debug("Scheduling eventloop advance") await sleep(0.001) await advance_eventloop() - # begin polling the eventloop - await schedule_next() - _message_counter = Any( help="""Monotonic counter of messages """, @@ -481,6 +479,10 @@ async def start(self, *, task_status: TaskStatus = TASK_STATUS_IGNORED) -> None: tg.start_soon(self.shell_main) def stop(self): + if not self._eventloop_set.is_set(): + # Stop the async task that is waiting for the eventloop to be set. + self._eventloop_set.set() + self.shell_stop.set() self.control_stop.set() diff --git a/pyproject.toml b/pyproject.toml index 9d9ebd618..aeaeef463 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,7 +33,7 @@ dependencies = [ "pyzmq>=25.0", "psutil>=5.7", "packaging>=22", - "anyio>=4.0.0", + "anyio>=4.2.0", ] [project.urls] diff --git a/tests/test_kernelapp.py b/tests/test_kernelapp.py index 6b9f451b9..05f6e5579 100644 --- a/tests/test_kernelapp.py +++ b/tests/test_kernelapp.py @@ -117,7 +117,8 @@ def test_merge_connection_file(): os.remove(cf) -@pytest.mark.skipif(trio is None, reason="requires trio") +# FIXME: @pytest.mark.skipif(trio is None, reason="requires trio") +@pytest.mark.skip() def test_trio_loop(): app = IPKernelApp(trio_loop=True) From 0fa543936d3f9ee69f6b9f8bd5a316692fde9aaa Mon Sep 17 00:00:00 2001 From: David Brochart Date: Tue, 10 Sep 2024 15:13:53 +0200 Subject: [PATCH 20/97] Remove direct use of asyncio (#1266) --- ipykernel/iostream.py | 2 -- ipykernel/kernelbase.py | 3 +-- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/ipykernel/iostream.py b/ipykernel/iostream.py index ea70831b8..6280905cd 100644 --- a/ipykernel/iostream.py +++ b/ipykernel/iostream.py @@ -3,7 +3,6 @@ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -import asyncio import atexit import contextvars import io @@ -99,7 +98,6 @@ def __init__(self, socket, pipe=False): self._event_pipes: Dict[threading.Thread, Any] = {} self._event_pipe_gc_lock: threading.Lock = threading.Lock() self._event_pipe_gc_seconds: float = 10 - self._event_pipe_gc_task: Optional[asyncio.Task[Any]] = None self._setup_event_pipe() tasks = [self._handle_event, self._run_event_pipe_gc] if pipe: diff --git a/ipykernel/kernelbase.py b/ipykernel/kernelbase.py index e5f5c1eec..050f57bee 100644 --- a/ipykernel/kernelbase.py +++ b/ipykernel/kernelbase.py @@ -4,7 +4,6 @@ # Distributed under the terms of the Modified BSD License. from __future__ import annotations -import asyncio import inspect import itertools import logging @@ -1258,7 +1257,7 @@ async def _progressively_terminate_all_children(self): delay, children, ) - await asyncio.sleep(delay) + await sleep(delay) async def _at_shutdown(self): """Actions taken at shutdown by the kernel, called by python's atexit.""" From 3089438d84447bf859556b89f528ef59332803c5 Mon Sep 17 00:00:00 2001 From: Ian Thomas Date: Thu, 3 Oct 2024 10:39:39 +0100 Subject: [PATCH 21/97] Kernel subshells (JEP91) implementation (#1249) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- docs/api/ipykernel.rst | 24 +++ ipykernel/control.py | 35 +---- ipykernel/heartbeat.py | 2 +- ipykernel/iostream.py | 10 +- ipykernel/kernelapp.py | 8 + ipykernel/kernelbase.py | 202 ++++++++++++++++++++---- ipykernel/shellchannel.py | 34 ++++ ipykernel/subshell.py | 36 +++++ ipykernel/subshell_manager.py | 283 ++++++++++++++++++++++++++++++++++ ipykernel/thread.py | 42 +++++ ipykernel/zmqshell.py | 37 ++++- tests/test_ipkernel_direct.py | 4 + tests/test_kernel_direct.py | 4 + tests/test_message_spec.py | 43 ++++++ tests/test_subshells.py | 269 ++++++++++++++++++++++++++++++++ tests/utils.py | 23 +++ 16 files changed, 988 insertions(+), 68 deletions(-) create mode 100644 ipykernel/shellchannel.py create mode 100644 ipykernel/subshell.py create mode 100644 ipykernel/subshell_manager.py create mode 100644 ipykernel/thread.py create mode 100644 tests/test_subshells.py diff --git a/docs/api/ipykernel.rst b/docs/api/ipykernel.rst index 2e1cf20d8..dd46d0842 100644 --- a/docs/api/ipykernel.rst +++ b/docs/api/ipykernel.rst @@ -110,6 +110,30 @@ Submodules :show-inheritance: +.. automodule:: ipykernel.shellchannel + :members: + :undoc-members: + :show-inheritance: + + +.. automodule:: ipykernel.subshell + :members: + :undoc-members: + :show-inheritance: + + +.. automodule:: ipykernel.subshell_manager + :members: + :undoc-members: + :show-inheritance: + + +.. automodule:: ipykernel.thread + :members: + :undoc-members: + :show-inheritance: + + .. automodule:: ipykernel.trio_runner :members: :undoc-members: diff --git a/ipykernel/control.py b/ipykernel/control.py index a70377c03..21d6d9962 100644 --- a/ipykernel/control.py +++ b/ipykernel/control.py @@ -1,40 +1,11 @@ """A thread for a control channel.""" -from threading import Event, Thread -from anyio import create_task_group, run, to_thread +from .thread import CONTROL_THREAD_NAME, BaseThread -CONTROL_THREAD_NAME = "Control" - -class ControlThread(Thread): +class ControlThread(BaseThread): """A thread for a control channel.""" def __init__(self, **kwargs): """Initialize the thread.""" - Thread.__init__(self, name=CONTROL_THREAD_NAME, **kwargs) - self.pydev_do_not_trace = True - self.is_pydev_daemon_thread = True - self.__stop = Event() - self._task = None - - def set_task(self, task): - self._task = task - - def run(self): - """Run the thread.""" - self.name = CONTROL_THREAD_NAME - run(self._main) - - async def _main(self): - async with create_task_group() as tg: - if self._task is not None: - tg.start_soon(self._task) - await to_thread.run_sync(self.__stop.wait) - tg.cancel_scope.cancel() - - def stop(self): - """Stop the thread. - - This method is threadsafe. - """ - self.__stop.set() + super().__init__(name=CONTROL_THREAD_NAME, **kwargs) diff --git a/ipykernel/heartbeat.py b/ipykernel/heartbeat.py index d2890f672..9816959dd 100644 --- a/ipykernel/heartbeat.py +++ b/ipykernel/heartbeat.py @@ -32,7 +32,7 @@ def __init__(self, context, addr=None): """Initialize the heartbeat thread.""" if addr is None: addr = ("tcp", localhost(), 0) - Thread.__init__(self, name="Heartbeat") + super().__init__(name="Heartbeat") self.context = context self.transport, self.ip, self.port = addr self.original_port = self.port diff --git a/ipykernel/iostream.py b/ipykernel/iostream.py index 6280905cd..beca44b16 100644 --- a/ipykernel/iostream.py +++ b/ipykernel/iostream.py @@ -40,7 +40,7 @@ class _IOPubThread(Thread): def __init__(self, tasks, **kwargs): """Initialize the thread.""" - Thread.__init__(self, name="IOPub", **kwargs) + super().__init__(name="IOPub", **kwargs) self._tasks = tasks self.pydev_do_not_trace = True self.is_pydev_daemon_thread = True @@ -170,10 +170,10 @@ async def _handle_event(self): for _ in range(n_events): event_f = self._events.popleft() event_f() - except Exception as e: + except Exception: if self.thread.__stop.is_set(): return - raise e + raise def _setup_pipe_in(self): """setup listening pipe for IOPub from forked subprocesses""" @@ -202,10 +202,10 @@ async def _handle_pipe_msgs(self): try: while True: await self._handle_pipe_msg() - except Exception as e: + except Exception: if self.thread.__stop.is_set(): return - raise e + raise async def _handle_pipe_msg(self, msg=None): """handle a pipe message from a subprocess""" diff --git a/ipykernel/kernelapp.py b/ipykernel/kernelapp.py index c02c3cf32..2f462af4c 100644 --- a/ipykernel/kernelapp.py +++ b/ipykernel/kernelapp.py @@ -53,6 +53,7 @@ from .iostream import IOPubThread from .ipkernel import IPythonKernel from .parentpoller import ParentPollerUnix, ParentPollerWindows +from .shellchannel import ShellChannelThread from .zmqshell import ZMQInteractiveShell # ----------------------------------------------------------------------------- @@ -143,6 +144,7 @@ class IPKernelApp(BaseIPythonApplication, InteractiveShellApp, ConnectionFileMix iopub_socket = Any() iopub_thread = Any() control_thread = Any() + shell_channel_thread = Any() _ports = Dict() @@ -367,6 +369,7 @@ def init_control(self, context): self.control_socket.router_handover = 1 self.control_thread = ControlThread(daemon=True) + self.shell_channel_thread = ShellChannelThread(context, self.shell_socket, daemon=True) def init_iopub(self, context): """Initialize the iopub channel.""" @@ -406,6 +409,10 @@ def close(self): self.log.debug("Closing control thread") self.control_thread.stop() self.control_thread.join() + if self.shell_channel_thread and self.shell_channel_thread.is_alive(): + self.log.debug("Closing shell channel thread") + self.shell_channel_thread.stop() + self.shell_channel_thread.join() if self.debugpy_socket and not self.debugpy_socket.closed: self.debugpy_socket.close() @@ -562,6 +569,7 @@ def init_kernel(self): debug_shell_socket=self.debug_shell_socket, shell_socket=self.shell_socket, control_thread=self.control_thread, + shell_channel_thread=self.shell_channel_thread, iopub_thread=self.iopub_thread, iopub_socket=self.iopub_socket, stdin_socket=self.stdin_socket, diff --git a/ipykernel/kernelbase.py b/ipykernel/kernelbase.py index 050f57bee..99358f9b6 100644 --- a/ipykernel/kernelbase.py +++ b/ipykernel/kernelbase.py @@ -18,7 +18,7 @@ from datetime import datetime from signal import SIGINT, SIGTERM, Signals -from .control import CONTROL_THREAD_NAME +from .thread import CONTROL_THREAD_NAME if sys.platform != "win32": from signal import SIGKILL @@ -103,6 +103,7 @@ class Kernel(SingletonConfigurable): debug_shell_socket = Any() control_thread = Any() + shell_channel_thread = Any() iopub_socket = Any() iopub_thread = Any() stdin_socket = Any() @@ -226,6 +227,9 @@ def _parent_header(self): "abort_request", "debug_request", "usage_request", + "create_subshell_request", + "delete_subshell_request", + "list_subshell_request", ] _eventloop_set: Event = Event() @@ -258,16 +262,17 @@ async def process_control(self): try: while True: await self.process_control_message() - except BaseException as e: - print("base exception") + except BaseException: if self.control_stop.is_set(): return - raise e + raise async def process_control_message(self, msg=None): """dispatch control requests""" assert self.control_socket is not None assert self.session is not None + assert self.control_thread is None or threading.current_thread() == self.control_thread + msg = msg or await self.control_socket.recv_multipart() copy = not isinstance(msg[0], zmq.Message) idents, msg = self.session.feed_identities(msg, copy=copy) @@ -356,28 +361,95 @@ async def advance_eventloop(): def _message_counter_default(self): return itertools.count() - async def shell_main(self): - async with create_task_group() as tg: - tg.start_soon(self.process_shell) - await to_thread.run_sync(self.shell_stop.wait) - tg.cancel_scope.cancel() + async def shell_channel_thread_main(self): + """Main loop for shell channel thread. + + Listen for incoming messages on kernel shell_socket. For each message + received, extract the subshell_id from the message header and forward the + message to the correct subshell via ZMQ inproc pair socket. + """ + assert self.shell_socket is not None + assert self.session is not None + assert self.shell_channel_thread is not None + assert threading.current_thread() == self.shell_channel_thread - async def process_shell(self): try: while True: - await self.process_shell_message() - except BaseException as e: + msg = await self.shell_socket.recv_multipart() + + # Deserialize whole message just to get subshell_id. + # Keep original message to send to subshell_id unmodified. + # Ideally only want to deserialize message once. + copy = not isinstance(msg[0], zmq.Message) + _, msg2 = self.session.feed_identities(msg, copy=copy) + try: + msg3 = self.session.deserialize(msg2, content=False, copy=copy) + subshell_id = msg3["header"].get("subshell_id") + + # Find inproc pair socket to use to send message to correct subshell. + socket = self.shell_channel_thread.manager.get_shell_channel_socket(subshell_id) + assert socket is not None + socket.send_multipart(msg, copy=False) + except Exception: + self.log.error("Invalid message", exc_info=True) # noqa: G201 + except BaseException: if self.shell_stop.is_set(): return - raise e + raise - async def process_shell_message(self, msg=None): - assert self.shell_socket is not None + async def shell_main(self, subshell_id: str | None): + """Main loop for a single subshell.""" + if self._supports_kernel_subshells: + if subshell_id is None: + assert threading.current_thread() == threading.main_thread() + else: + assert threading.current_thread() not in ( + self.shell_channel_thread, + threading.main_thread(), + ) + # Inproc pair socket that this subshell uses to talk to shell channel thread. + socket = self.shell_channel_thread.manager.get_other_socket(subshell_id) + else: + assert subshell_id is None + assert threading.current_thread() == threading.main_thread() + socket = self.shell_socket + + async with create_task_group() as tg: + tg.start_soon(self.process_shell, socket) + if subshell_id is None: + # Main subshell. + await to_thread.run_sync(self.shell_stop.wait) + tg.cancel_scope.cancel() + + async def process_shell(self, socket=None): + # socket=None is valid if kernel subshells are not supported. + try: + while True: + await self.process_shell_message(socket=socket) + except BaseException: + if self.shell_stop.is_set(): + return + raise + + async def process_shell_message(self, msg=None, socket=None): + # If socket is None kernel subshells are not supported so use socket=shell_socket. + # If msg is set, process that message. + # If msg is None, await the next message to arrive on the socket. assert self.session is not None + if self._supports_kernel_subshells: + assert threading.current_thread() not in ( + self.control_thread, + self.shell_channel_thread, + ) + assert socket is not None + else: + assert threading.current_thread() == threading.main_thread() + assert socket is None + socket = self.shell_socket - no_msg = msg is None if self._is_test else not await self.shell_socket.poll(0) + no_msg = msg is None if self._is_test else not await socket.poll(0) + msg = msg or await socket.recv_multipart(copy=False) - msg = msg or await self.shell_socket.recv_multipart() received_time = time.monotonic() copy = not isinstance(msg[0], zmq.Message) idents, msg = self.session.feed_identities(msg, copy=copy) @@ -401,7 +473,7 @@ async def process_shell_message(self, msg=None): elif received_time - self._aborted_time > self.stop_on_error_timeout: self._aborting = False if self._aborting: - await self._send_abort_reply(self.shell_socket, msg, idents) + await self._send_abort_reply(socket, msg, idents) self._publish_status("idle", "shell") return @@ -411,7 +483,7 @@ async def process_shell_message(self, msg=None): self.log.debug("\n*** MESSAGE TYPE:%s***", msg_type) self.log.debug(" Content: %s\n --->\n ", msg["content"]) - if not await self.should_handle(self.shell_socket, msg, idents): + if not await self.should_handle(socket, msg, idents): return handler = self.shell_handlers.get(msg_type) @@ -424,7 +496,7 @@ async def process_shell_message(self, msg=None): except Exception: self.log.debug("Unable to signal in pre_handler_hook:", exc_info=True) try: - result = handler(self.shell_socket, idents, msg) + result = handler(socket, idents, msg) if inspect.isawaitable(result): await result except Exception: @@ -465,7 +537,7 @@ async def start(self, *, task_status: TaskStatus = TASK_STATUS_IGNORED) -> None: self.control_stop = threading.Event() if not self._is_test and self.control_socket is not None: if self.control_thread: - self.control_thread.set_task(self.control_main) + self.control_thread.add_task(self.control_main) self.control_thread.start() else: tg.start_soon(self.control_main) @@ -474,8 +546,19 @@ async def start(self, *, task_status: TaskStatus = TASK_STATUS_IGNORED) -> None: self.shell_is_awaiting = False self.shell_is_blocking = False self.shell_stop = threading.Event() - if not self._is_test and self.shell_socket is not None: - tg.start_soon(self.shell_main) + + if self.shell_channel_thread: + tg.start_soon(self.shell_main, None) + + # Assign tasks to and start shell channel thread. + manager = self.shell_channel_thread.manager + self.shell_channel_thread.add_task(self.shell_channel_thread_main) + self.shell_channel_thread.add_task(manager.listen_from_control, self.shell_main) + self.shell_channel_thread.add_task(manager.listen_from_subshells) + self.shell_channel_thread.start() + else: + if not self._is_test and self.shell_socket is not None: + tg.start_soon(self.shell_main, None) def stop(self): if not self._eventloop_set.is_set(): @@ -635,8 +718,7 @@ async def execute_request(self, socket, ident, parent): cell_meta = parent.get("metadata", {}) cell_id = cell_meta.get("cellId") except Exception: - self.log.error("Got bad msg: ") - self.log.error("%s", parent) + self.log.error("Got bad msg from parent: %s", parent) return stop_on_error = content.get("stop_on_error", True) @@ -687,8 +769,8 @@ async def execute_request(self, socket, ident, parent): reply_msg = self.session.send( socket, "execute_reply", - reply_content, - parent, + content=reply_content, + parent=parent, metadata=metadata, ident=ident, ) @@ -806,14 +888,18 @@ async def connect_request(self, socket, ident, parent): @property def kernel_info(self): - return { + info = { "protocol_version": kernel_protocol_version, "implementation": self.implementation, "implementation_version": self.implementation_version, "language_info": self.language_info, "banner": self.banner, "help_links": self.help_links, + "supported_features": [], } + if self._supports_kernel_subshells: + info["supported_features"] = ["kernel subshells"] + return info async def kernel_info_request(self, socket, ident, parent): """Handle a kernel info request.""" @@ -984,6 +1070,62 @@ async def usage_request(self, socket, ident, parent): async def do_debug_request(self, msg): raise NotImplementedError + # --------------------------------------------------------------------------- + # Subshell control message handlers + # --------------------------------------------------------------------------- + + async def create_subshell_request(self, socket, ident, parent) -> None: + if not self.session: + return + if not self._supports_kernel_subshells: + self.log.error("Subshells are not supported by this kernel") + return + + # This should only be called in the control thread if it exists. + # Request is passed to shell channel thread to process. + other_socket = self.shell_channel_thread.manager.get_control_other_socket() + await other_socket.send_json({"type": "create"}) + reply = await other_socket.recv_json() + + self.session.send(socket, "create_subshell_reply", reply, parent, ident) + + async def delete_subshell_request(self, socket, ident, parent) -> None: + if not self.session: + return + if not self._supports_kernel_subshells: + self.log.error("KERNEL SUBSHELLS NOT SUPPORTED") + return + + try: + content = parent["content"] + subshell_id = content["subshell_id"] + except Exception: + self.log.error("Got bad msg from parent: %s", parent) + return + + # This should only be called in the control thread if it exists. + # Request is passed to shell channel thread to process. + other_socket = self.shell_channel_thread.manager.get_control_other_socket() + await other_socket.send_json({"type": "delete", "subshell_id": subshell_id}) + reply = await other_socket.recv_json() + + self.session.send(socket, "delete_subshell_reply", reply, parent, ident) + + async def list_subshell_request(self, socket, ident, parent) -> None: + if not self.session: + return + if not self._supports_kernel_subshells: + self.log.error("Subshells are not supported by this kernel") + return + + # This should only be called in the control thread if it exists. + # Request is passed to shell channel thread to process. + other_socket = self.shell_channel_thread.manager.get_control_other_socket() + await other_socket.send_json({"type": "list"}) + reply = await other_socket.recv_json() + + self.session.send(socket, "list_subshell_reply", reply, parent, ident) + # --------------------------------------------------------------------------- # Engine methods (DEPRECATED) # --------------------------------------------------------------------------- @@ -1274,3 +1416,7 @@ async def _at_shutdown(self): ident=self._topic("shutdown"), ) self.log.debug("%s", self._shutdown_message) + + @property + def _supports_kernel_subshells(self): + return self.shell_channel_thread is not None diff --git a/ipykernel/shellchannel.py b/ipykernel/shellchannel.py new file mode 100644 index 000000000..bc0459c46 --- /dev/null +++ b/ipykernel/shellchannel.py @@ -0,0 +1,34 @@ +"""A thread for a shell channel.""" +import zmq.asyncio + +from .subshell_manager import SubshellManager +from .thread import SHELL_CHANNEL_THREAD_NAME, BaseThread + + +class ShellChannelThread(BaseThread): + """A thread for a shell channel. + + Communicates with shell/subshell threads via pairs of ZMQ inproc sockets. + """ + + def __init__(self, context: zmq.asyncio.Context, shell_socket: zmq.asyncio.Socket, **kwargs): + """Initialize the thread.""" + super().__init__(name=SHELL_CHANNEL_THREAD_NAME, **kwargs) + self._manager: SubshellManager | None = None + self._context = context + self._shell_socket = shell_socket + + @property + def manager(self) -> SubshellManager: + # Lazy initialisation. + if self._manager is None: + self._manager = SubshellManager(self._context, self._shell_socket) + return self._manager + + def run(self) -> None: + """Run the thread.""" + try: + super().run() + finally: + if self._manager: + self._manager.close() diff --git a/ipykernel/subshell.py b/ipykernel/subshell.py new file mode 100644 index 000000000..18e15ab38 --- /dev/null +++ b/ipykernel/subshell.py @@ -0,0 +1,36 @@ +"""A thread for a subshell.""" + +from threading import current_thread + +import zmq.asyncio + +from .thread import BaseThread + + +class SubshellThread(BaseThread): + """A thread for a subshell.""" + + def __init__(self, subshell_id: str, **kwargs): + """Initialize the thread.""" + super().__init__(name=f"subshell-{subshell_id}", **kwargs) + + # Inproc PAIR socket, for communication with shell channel thread. + self._pair_socket: zmq.asyncio.Socket | None = None + + async def create_pair_socket(self, context: zmq.asyncio.Context, address: str) -> None: + """Create inproc PAIR socket, for communication with shell channel thread. + + Should be called from this thread, so usually via add_task before the + thread is started. + """ + assert current_thread() == self + self._pair_socket = context.socket(zmq.PAIR) + self._pair_socket.connect(address) + + def run(self) -> None: + try: + super().run() + finally: + if self._pair_socket is not None: + self._pair_socket.close() + self._pair_socket = None diff --git a/ipykernel/subshell_manager.py b/ipykernel/subshell_manager.py new file mode 100644 index 000000000..805d6f812 --- /dev/null +++ b/ipykernel/subshell_manager.py @@ -0,0 +1,283 @@ +"""Manager of subshells in a kernel.""" + +# Copyright (c) IPython Development Team. +# Distributed under the terms of the Modified BSD License. +from __future__ import annotations + +import typing as t +import uuid +from dataclasses import dataclass +from threading import Lock, current_thread, main_thread + +import zmq +import zmq.asyncio +from anyio import create_memory_object_stream, create_task_group + +from .subshell import SubshellThread +from .thread import SHELL_CHANNEL_THREAD_NAME + + +@dataclass +class Subshell: + thread: SubshellThread + shell_channel_socket: zmq.asyncio.Socket + + +class SubshellManager: + """A manager of subshells. + + Controls the lifetimes of subshell threads and their associated ZMQ sockets. + Runs mostly in the shell channel thread. + + Care needed with threadsafe access here. All write access to the cache occurs in + the shell channel thread so there is only ever one write access at any one time. + Reading of cache information can be performed by other threads, so all reads are + protected by a lock so that they are atomic. + + Sending reply messages via the shell_socket is wrapped by another lock to protect + against multiple subshells attempting to send at the same time. + """ + + def __init__(self, context: zmq.asyncio.Context, shell_socket: zmq.asyncio.Socket): + assert current_thread() == main_thread() + + self._context: zmq.asyncio.Context = context + self._shell_socket = shell_socket + self._cache: dict[str, Subshell] = {} + self._lock_cache = Lock() + self._lock_shell_socket = Lock() + + # Inproc pair sockets for control channel and main shell (parent subshell). + # Each inproc pair has a "shell_channel" socket used in the shell channel + # thread, and an "other" socket used in the other thread. + self._control_shell_channel_socket = self._create_inproc_pair_socket("control", True) + self._control_other_socket = self._create_inproc_pair_socket("control", False) + self._parent_shell_channel_socket = self._create_inproc_pair_socket(None, True) + self._parent_other_socket = self._create_inproc_pair_socket(None, False) + + # anyio memory object stream for async queue-like communication between tasks. + # Used by _create_subshell to tell listen_from_subshells to spawn a new task. + self._send_stream, self._receive_stream = create_memory_object_stream[str]() + + def close(self) -> None: + """Stop all subshells and close all resources.""" + assert current_thread().name == SHELL_CHANNEL_THREAD_NAME + + self._send_stream.close() + self._receive_stream.close() + + for socket in ( + self._control_shell_channel_socket, + self._control_other_socket, + self._parent_shell_channel_socket, + self._parent_other_socket, + ): + if socket is not None: + socket.close() + + with self._lock_cache: + while True: + try: + _, subshell = self._cache.popitem() + except KeyError: + break + self._stop_subshell(subshell) + + def get_control_other_socket(self) -> zmq.asyncio.Socket: + return self._control_other_socket + + def get_other_socket(self, subshell_id: str | None) -> zmq.asyncio.Socket: + """Return the other inproc pair socket for a subshell. + + This socket is accessed from the subshell thread. + """ + if subshell_id is None: + return self._parent_other_socket + with self._lock_cache: + socket = self._cache[subshell_id].thread._pair_socket + assert socket is not None + return socket + + def get_shell_channel_socket(self, subshell_id: str | None) -> zmq.asyncio.Socket: + """Return the shell channel inproc pair socket for a subshell. + + This socket is accessed from the shell channel thread. + """ + if subshell_id is None: + return self._parent_shell_channel_socket + with self._lock_cache: + return self._cache[subshell_id].shell_channel_socket + + def list_subshell(self) -> list[str]: + """Return list of current subshell ids. + + Can be called by any subshell using %subshell magic. + """ + with self._lock_cache: + return list(self._cache) + + async def listen_from_control(self, subshell_task: t.Any) -> None: + """Listen for messages on the control inproc socket, handle those messages and + return replies on the same socket. Runs in the shell channel thread. + """ + assert current_thread().name == SHELL_CHANNEL_THREAD_NAME + + socket = self._control_shell_channel_socket + while True: + request = await socket.recv_json() # type: ignore[misc] + reply = await self._process_control_request(request, subshell_task) + await socket.send_json(reply) # type: ignore[func-returns-value] + + async def listen_from_subshells(self) -> None: + """Listen for reply messages on inproc sockets of all subshells and resend + those messages to the client via the shell_socket. + + Runs in the shell channel thread. + """ + assert current_thread().name == SHELL_CHANNEL_THREAD_NAME + + async with create_task_group() as tg: + tg.start_soon(self._listen_for_subshell_reply, None) + async for subshell_id in self._receive_stream: + tg.start_soon(self._listen_for_subshell_reply, subshell_id) + + def subshell_id_from_thread_id(self, thread_id: int) -> str | None: + """Return subshell_id of the specified thread_id. + + Raises RuntimeError if thread_id is not the main shell or a subshell. + + Only used by %subshell magic so does not have to be fast/cached. + """ + with self._lock_cache: + if thread_id == main_thread().ident: + return None + for id, subshell in self._cache.items(): + if subshell.thread.ident == thread_id: + return id + msg = f"Thread id {thread_id!r} does not correspond to a subshell of this kernel" + raise RuntimeError(msg) + + def _create_inproc_pair_socket( + self, name: str | None, shell_channel_end: bool + ) -> zmq.asyncio.Socket: + """Create and return a single ZMQ inproc pair socket.""" + address = self._get_inproc_socket_address(name) + socket = self._context.socket(zmq.PAIR) + if shell_channel_end: + socket.bind(address) + else: + socket.connect(address) + return socket + + async def _create_subshell(self, subshell_task: t.Any) -> str: + """Create and start a new subshell thread.""" + assert current_thread().name == SHELL_CHANNEL_THREAD_NAME + + subshell_id = str(uuid.uuid4()) + thread = SubshellThread(subshell_id) + + with self._lock_cache: + assert subshell_id not in self._cache + shell_channel_socket = self._create_inproc_pair_socket(subshell_id, True) + self._cache[subshell_id] = Subshell(thread, shell_channel_socket) + + # Tell task running listen_from_subshells to create a new task to listen for + # reply messages from the new subshell to resend to the client. + await self._send_stream.send(subshell_id) + + address = self._get_inproc_socket_address(subshell_id) + thread.add_task(thread.create_pair_socket, self._context, address) + thread.add_task(subshell_task, subshell_id) + thread.start() + + return subshell_id + + def _delete_subshell(self, subshell_id: str) -> None: + """Delete subshell identified by subshell_id. + + Raises key error if subshell_id not in cache. + """ + assert current_thread().name == SHELL_CHANNEL_THREAD_NAME + + with self._lock_cache: + subshell = self._cache.pop(subshell_id) + + self._stop_subshell(subshell) + + def _get_inproc_socket_address(self, name: str | None) -> str: + full_name = f"subshell-{name}" if name else "subshell" + return f"inproc://{full_name}" + + def _get_shell_channel_socket(self, subshell_id: str | None) -> zmq.asyncio.Socket: + if subshell_id is None: + return self._parent_shell_channel_socket + with self._lock_cache: + return self._cache[subshell_id].shell_channel_socket + + def _is_subshell(self, subshell_id: str | None) -> bool: + if subshell_id is None: + return True + with self._lock_cache: + return subshell_id in self._cache + + async def _listen_for_subshell_reply(self, subshell_id: str | None) -> None: + """Listen for reply messages on specified subshell inproc socket and + resend to the client via the shell_socket. + + Runs in the shell channel thread. + """ + assert current_thread().name == SHELL_CHANNEL_THREAD_NAME + + shell_channel_socket = self._get_shell_channel_socket(subshell_id) + + try: + while True: + msg = await shell_channel_socket.recv_multipart(copy=False) + with self._lock_shell_socket: + await self._shell_socket.send_multipart(msg) + except BaseException: + if not self._is_subshell(subshell_id): + # Subshell no longer exists so exit gracefully + return + raise + + async def _process_control_request( + self, request: dict[str, t.Any], subshell_task: t.Any + ) -> dict[str, t.Any]: + """Process a control request message received on the control inproc + socket and return the reply. Runs in the shell channel thread. + """ + assert current_thread().name == SHELL_CHANNEL_THREAD_NAME + + try: + type = request["type"] + reply: dict[str, t.Any] = {"status": "ok"} + + if type == "create": + reply["subshell_id"] = await self._create_subshell(subshell_task) + elif type == "delete": + subshell_id = request["subshell_id"] + self._delete_subshell(subshell_id) + elif type == "list": + reply["subshell_id"] = self.list_subshell() + else: + msg = f"Unrecognised message type {type!r}" + raise RuntimeError(msg) + except BaseException as err: + reply = { + "status": "error", + "evalue": str(err), + } + return reply + + def _stop_subshell(self, subshell: Subshell) -> None: + """Stop a subshell thread and close all of its resources.""" + assert current_thread().name == SHELL_CHANNEL_THREAD_NAME + + thread = subshell.thread + if thread.is_alive(): + thread.stop() + thread.join() + + # Closing the shell_channel_socket terminates the task that is listening on it. + subshell.shell_channel_socket.close() diff --git a/ipykernel/thread.py b/ipykernel/thread.py new file mode 100644 index 000000000..a63011de7 --- /dev/null +++ b/ipykernel/thread.py @@ -0,0 +1,42 @@ +"""Base class for threads.""" +import typing as t +from threading import Event, Thread + +from anyio import create_task_group, run, to_thread + +CONTROL_THREAD_NAME = "Control" +SHELL_CHANNEL_THREAD_NAME = "Shell channel" + + +class BaseThread(Thread): + """Base class for threads.""" + + def __init__(self, **kwargs): + """Initialize the thread.""" + super().__init__(**kwargs) + self.pydev_do_not_trace = True + self.is_pydev_daemon_thread = True + self.__stop = Event() + self._tasks_and_args: t.List[t.Tuple[t.Any, t.Any]] = [] + + def add_task(self, task: t.Any, *args: t.Any) -> None: + # May only add tasks before the thread is started. + self._tasks_and_args.append((task, args)) + + def run(self) -> t.Any: + """Run the thread.""" + return run(self._main) + + async def _main(self) -> None: + async with create_task_group() as tg: + for task, args in self._tasks_and_args: + tg.start_soon(task, *args) + await to_thread.run_sync(self.__stop.wait) + tg.cancel_scope.cancel() + + def stop(self) -> None: + """Stop the thread. + + This method is threadsafe. + """ + self.__stop.set() diff --git a/ipykernel/zmqshell.py b/ipykernel/zmqshell.py index bc99d000d..3f97e8170 100644 --- a/ipykernel/zmqshell.py +++ b/ipykernel/zmqshell.py @@ -16,9 +16,9 @@ import os import sys +import threading import warnings from pathlib import Path -from threading import local from IPython.core import page, payloadpage from IPython.core.autocall import ZMQExitAutocall @@ -69,7 +69,7 @@ def _flush_streams(self): @default("_thread_local") def _default_thread_local(self): """Initialize our thread local storage""" - return local() + return threading.local() @property def _hooks(self): @@ -439,6 +439,39 @@ def autosave(self, arg_s): else: print("Autosave disabled") + @line_magic + def subshell(self, arg_s): + """ + List all current subshells + """ + from ipykernel.kernelapp import IPKernelApp + + if not IPKernelApp.initialized(): + msg = "Not in a running Kernel" + raise RuntimeError(msg) + + app = IPKernelApp.instance() + kernel = app.kernel + + if not getattr(kernel, "_supports_kernel_subshells", False): + print("Kernel does not support subshells") + return + + thread_id = threading.current_thread().ident + manager = kernel.shell_channel_thread.manager + try: + subshell_id = manager.subshell_id_from_thread_id(thread_id) + except RuntimeError: + subshell_id = "unknown" + subshell_id_list = manager.list_subshell() + + print(f"subshell id: {subshell_id}") + print(f"thread id: {thread_id}") + print(f"main thread id: {threading.main_thread().ident}") + print(f"pid: {os.getpid()}") + print(f"thread count: {threading.active_count()}") + print(f"subshell list: {subshell_id_list}") + class ZMQInteractiveShell(InteractiveShell): """A subclass of InteractiveShell for ZMQ.""" diff --git a/tests/test_ipkernel_direct.py b/tests/test_ipkernel_direct.py index cea2ec994..dfd0445cf 100644 --- a/tests/test_ipkernel_direct.py +++ b/tests/test_ipkernel_direct.py @@ -27,6 +27,10 @@ async def test_properties(ipkernel: IPythonKernel) -> None: async def test_direct_kernel_info_request(ipkernel): reply = await ipkernel.test_shell_message("kernel_info_request", {}) assert reply["header"]["msg_type"] == "kernel_info_reply" + assert ( + "supported_features" not in reply["content"] + or "kernel subshells" not in reply["content"]["supported_features"] + ) async def test_direct_execute_request(ipkernel: MockIPyKernel) -> None: diff --git a/tests/test_kernel_direct.py b/tests/test_kernel_direct.py index ea3c6fe7e..50801b033 100644 --- a/tests/test_kernel_direct.py +++ b/tests/test_kernel_direct.py @@ -16,6 +16,10 @@ async def test_direct_kernel_info_request(kernel): reply = await kernel.test_shell_message("kernel_info_request", {}) assert reply["header"]["msg_type"] == "kernel_info_reply" + assert ( + "supported_features" not in reply["content"] + or "kernel subshells" not in reply["content"]["supported_features"] + ) async def test_direct_execute_request(kernel): diff --git a/tests/test_message_spec.py b/tests/test_message_spec.py index d98503ee7..694de44b1 100644 --- a/tests/test_message_spec.py +++ b/tests/test_message_spec.py @@ -239,6 +239,21 @@ class HistoryReply(Reply): history = List(List()) +# Subshell control messages + + +class CreateSubshellReply(Reply): + subshell_id = Unicode() + + +class DeleteSubshellReply(Reply): + pass + + +class ListSubshellReply(Reply): + subshell_id = List(Unicode()) + + references = { "execute_reply": ExecuteReply(), "inspect_reply": InspectReply(), @@ -255,6 +270,9 @@ class HistoryReply(Reply): "stream": Stream(), "display_data": DisplayData(), "header": RHeader(), + "create_subshell_reply": CreateSubshellReply(), + "delete_subshell_reply": DeleteSubshellReply(), + "list_subshell_reply": ListSubshellReply(), } # ----------------------------------------------------------------------------- @@ -498,6 +516,8 @@ def test_kernel_info_request(): msg_id = KC.kernel_info() reply = get_reply(KC, msg_id, TIMEOUT) validate_message(reply, "kernel_info_reply", msg_id) + assert "supported_features" in reply["content"] + assert "kernel subshells" in reply["content"]["supported_features"] def test_connect_request(): @@ -509,6 +529,29 @@ def test_connect_request(): validate_message(reply, "connect_reply", msg_id) +def test_subshell(): + flush_channels() + + msg = KC.session.msg("create_subshell_request") + KC.control_channel.send(msg) + msg_id = msg["header"]["msg_id"] + reply = get_reply(KC, msg_id, TIMEOUT, channel="control") + validate_message(reply, "create_subshell_reply", msg_id) + subshell_id = reply["content"]["subshell_id"] + + msg = KC.session.msg("list_subshell_request") + KC.control_channel.send(msg) + msg_id = msg["header"]["msg_id"] + reply = get_reply(KC, msg_id, TIMEOUT, channel="control") + validate_message(reply, "list_subshell_reply", msg_id) + + msg = KC.session.msg("delete_subshell_request", {"subshell_id": subshell_id}) + KC.control_channel.send(msg) + msg_id = msg["header"]["msg_id"] + reply = get_reply(KC, msg_id, TIMEOUT, channel="control") + validate_message(reply, "delete_subshell_reply", msg_id) + + @pytest.mark.skipif( version_info < (5, 0), reason="earlier Jupyter Client don't have comm_info", diff --git a/tests/test_subshells.py b/tests/test_subshells.py new file mode 100644 index 000000000..f1328ddad --- /dev/null +++ b/tests/test_subshells.py @@ -0,0 +1,269 @@ +"""Test kernel subshells.""" + +# Copyright (c) IPython Development Team. +# Distributed under the terms of the Modified BSD License. +from __future__ import annotations + +import platform +import time +from datetime import datetime, timedelta + +import pytest +from jupyter_client.blocking.client import BlockingKernelClient + +from .utils import TIMEOUT, get_replies, get_reply, new_kernel + +# Helpers + + +def create_subshell_helper(kc: BlockingKernelClient): + msg = kc.session.msg("create_subshell_request") + kc.control_channel.send(msg) + msg_id = msg["header"]["msg_id"] + reply = get_reply(kc, msg_id, TIMEOUT, channel="control") + return reply["content"] + + +def delete_subshell_helper(kc: BlockingKernelClient, subshell_id: str): + msg = kc.session.msg("delete_subshell_request", {"subshell_id": subshell_id}) + kc.control_channel.send(msg) + msg_id = msg["header"]["msg_id"] + reply = get_reply(kc, msg_id, TIMEOUT, channel="control") + return reply["content"] + + +def list_subshell_helper(kc: BlockingKernelClient): + msg = kc.session.msg("list_subshell_request") + kc.control_channel.send(msg) + msg_id = msg["header"]["msg_id"] + reply = get_reply(kc, msg_id, TIMEOUT, channel="control") + return reply["content"] + + +def execute_request_subshell_id( + kc: BlockingKernelClient, code: str, subshell_id: str | None, terminator: str = "\n" +): + msg = kc.session.msg("execute_request", {"code": code}) + msg["header"]["subshell_id"] = subshell_id + msg_id = msg["msg_id"] + kc.shell_channel.send(msg) + stdout = "" + while True: + msg = kc.get_iopub_msg() + # Get the stream messages corresponding to msg_id + if ( + msg["msg_type"] == "stream" + and msg["parent_header"]["msg_id"] == msg_id + and msg["content"]["name"] == "stdout" + ): + stdout += msg["content"]["text"] + if stdout.endswith(terminator): + break + return stdout.strip() + + +def execute_thread_count(kc: BlockingKernelClient) -> int: + code = "import threading as t; print(t.active_count())" + return int(execute_request_subshell_id(kc, code, None)) + + +def execute_thread_ids(kc: BlockingKernelClient, subshell_id: str | None = None) -> tuple[str, str]: + code = "import threading as t; print(t.get_ident(), t.main_thread().ident)" + return execute_request_subshell_id(kc, code, subshell_id).split() + + +# Tests + + +def test_supported(): + with new_kernel() as kc: + msg_id = kc.kernel_info() + reply = get_reply(kc, msg_id, TIMEOUT) + assert "supported_features" in reply["content"] + assert "kernel subshells" in reply["content"]["supported_features"] + + +def test_subshell_id_lifetime(): + with new_kernel() as kc: + assert list_subshell_helper(kc)["subshell_id"] == [] + subshell_id = create_subshell_helper(kc)["subshell_id"] + assert list_subshell_helper(kc)["subshell_id"] == [subshell_id] + delete_subshell_helper(kc, subshell_id) + assert list_subshell_helper(kc)["subshell_id"] == [] + + +def test_delete_non_existent(): + with new_kernel() as kc: + reply = delete_subshell_helper(kc, "unknown_subshell_id") + assert reply["status"] == "error" + assert "evalue" in reply + + +def test_thread_counts(): + with new_kernel() as kc: + nthreads = execute_thread_count(kc) + + subshell_id = create_subshell_helper(kc)["subshell_id"] + nthreads2 = execute_thread_count(kc) + assert nthreads2 > nthreads + + delete_subshell_helper(kc, subshell_id) + nthreads3 = execute_thread_count(kc) + assert nthreads3 == nthreads + + +def test_thread_ids(): + with new_kernel() as kc: + subshell_id = create_subshell_helper(kc)["subshell_id"] + + thread_id, main_thread_id = execute_thread_ids(kc) + assert thread_id == main_thread_id + + thread_id, main_thread_id = execute_thread_ids(kc, subshell_id) + assert thread_id != main_thread_id + + delete_subshell_helper(kc, subshell_id) + + +@pytest.mark.parametrize("are_subshells", [(False, True), (True, False), (True, True)]) +@pytest.mark.parametrize("overlap", [True, False]) +def test_run_concurrently_sequence(are_subshells, overlap): + with new_kernel() as kc: + subshell_ids = [ + create_subshell_helper(kc)["subshell_id"] if is_subshell else None + for is_subshell in are_subshells + ] + if overlap: + codes = [ + "import time; start0=True; end0=False; time.sleep(0.2); end0=True", + "assert start0; assert not end0; time.sleep(0.2); assert end0", + ] + else: + codes = [ + "import time; start0=True; end0=False; time.sleep(0.2); assert end1", + "assert start0; assert not end0; end1=True", + ] + + msgs = [] + for subshell_id, code in zip(subshell_ids, codes): + msg = kc.session.msg("execute_request", {"code": code}) + msg["header"]["subshell_id"] = subshell_id + kc.shell_channel.send(msg) + msgs.append(msg) + if len(msgs) == 1: + time.sleep(0.1) # Wait for first execute_request to start. + + replies = get_replies(kc, [msg["msg_id"] for msg in msgs]) + + for subshell_id in subshell_ids: + if subshell_id: + delete_subshell_helper(kc, subshell_id) + + for reply in replies: + assert reply["content"]["status"] == "ok" + + +@pytest.mark.parametrize("include_main_shell", [True, False]) +def test_run_concurrently_timing(include_main_shell): + with new_kernel() as kc: + subshell_ids = [ + None if include_main_shell else create_subshell_helper(kc)["subshell_id"], + create_subshell_helper(kc)["subshell_id"], + ] + + times = (0.2, 0.2) + # Prepare messages, times are sleep times in seconds. + # Identical times for both subshells is a harder test as preparing and sending + # the execute_reply messages may overlap. + msgs = [] + for id, sleep in zip(subshell_ids, times): + code = f"import time; time.sleep({sleep})" + msg = kc.session.msg("execute_request", {"code": code}) + msg["header"]["subshell_id"] = id + msgs.append(msg) + + # Send messages + start = datetime.now() + for msg in msgs: + kc.shell_channel.send(msg) + + _ = get_replies(kc, [msg["msg_id"] for msg in msgs]) + end = datetime.now() + + for subshell_id in subshell_ids: + if subshell_id: + delete_subshell_helper(kc, subshell_id) + + duration = end - start + assert duration >= timedelta(seconds=max(times)) + # Care is needed with this test as runtime conditions such as gathering + # coverage can slow it down causing the following assert to fail. + # The sleep time of 0.2 is empirically determined to run OK in CI, but + # consider increasing it if the following fails. + assert duration < timedelta(seconds=sum(times)) + + +def test_execution_count(): + with new_kernel() as kc: + subshell_id = create_subshell_helper(kc)["subshell_id"] + + # Prepare messages + times = (0.2, 0.1, 0.4, 0.15) # Sleep seconds + msgs = [] + for id, sleep in zip((None, subshell_id, None, subshell_id), times): + code = f"import time; time.sleep({sleep})" + msg = kc.session.msg("execute_request", {"code": code}) + msg["header"]["subshell_id"] = id + msgs.append(msg) + + for msg in msgs: + kc.shell_channel.send(msg) + + # Wait for replies, may be in any order. + replies = get_replies(kc, [msg["msg_id"] for msg in msgs]) + + delete_subshell_helper(kc, subshell_id) + + execution_counts = [r["content"]["execution_count"] for r in replies] + ec = execution_counts[0] + assert execution_counts == [ec, ec - 1, ec + 2, ec + 1] + + +def test_create_while_execute(): + with new_kernel() as kc: + # Send request to execute code on main subshell. + msg = kc.session.msg("execute_request", {"code": "import time; time.sleep(0.05)"}) + kc.shell_channel.send(msg) + + # Create subshell via control channel. + control_msg = kc.session.msg("create_subshell_request") + kc.control_channel.send(control_msg) + control_reply = get_reply(kc, control_msg["header"]["msg_id"], TIMEOUT, channel="control") + subshell_id = control_reply["content"]["subshell_id"] + control_date = control_reply["header"]["date"] + + # Get result message from main subshell. + shell_date = get_reply(kc, msg["msg_id"])["header"]["date"] + + delete_subshell_helper(kc, subshell_id) + + assert control_date < shell_date + + +@pytest.mark.skipif( + platform.python_implementation() == "PyPy", + reason="does not work on PyPy", +) +def test_shutdown_with_subshell(): + # Based on test_kernel.py::test_shutdown + with new_kernel() as kc: + km = kc.parent + subshell_id = create_subshell_helper(kc)["subshell_id"] + assert list_subshell_helper(kc)["subshell_id"] == [subshell_id] + kc.shutdown() + for _ in range(100): # 10 s timeout + if km.is_alive(): + time.sleep(0.1) + else: + break + assert not km.is_alive() diff --git a/tests/utils.py b/tests/utils.py index b1b4119f0..b20e8fcb2 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -2,6 +2,7 @@ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. +from __future__ import annotations import atexit import os @@ -68,6 +69,28 @@ def get_reply(kc, msg_id, timeout=TIMEOUT, channel="shell"): return reply +def get_replies(kc, msg_ids: list[str], timeout=TIMEOUT, channel="shell"): + # Get replies which may arrive in any order as they may be running on different subshells. + # Replies are returned in the same order as the msg_ids, not in the order of arrival. + t0 = time() + count = 0 + replies = [None] * len(msg_ids) + while count < len(msg_ids): + get_msg = getattr(kc, f"get_{channel}_msg") + reply = get_msg(timeout=timeout) + try: + msg_id = reply["parent_header"]["msg_id"] + replies[msg_ids.index(msg_id)] = reply + count += 1 + except ValueError: + # Allow debugging ignored replies + print(f"Ignoring reply not to any of {msg_ids}: {reply}") + t1 = time() + timeout -= t1 - t0 + t0 = t1 + return replies + + def execute(code="", kc=None, **kwargs): """wrapper for doing common steps for validating an execution request""" from .test_message_spec import validate_message From 314cc49da6e7d69d74f4741d4ea6568e926d1819 Mon Sep 17 00:00:00 2001 From: bluss Date: Tue, 15 Oct 2024 11:25:38 +0200 Subject: [PATCH 22/97] Detect parent change in more cases on unix (#1271) --- ipykernel/kernelapp.py | 2 +- ipykernel/parentpoller.py | 28 +++++++++++++++++++++++++--- tests/test_parentpoller.py | 18 +++++++++++++++++- 3 files changed, 43 insertions(+), 5 deletions(-) diff --git a/ipykernel/kernelapp.py b/ipykernel/kernelapp.py index 2f462af4c..394f52a47 100644 --- a/ipykernel/kernelapp.py +++ b/ipykernel/kernelapp.py @@ -220,7 +220,7 @@ def init_poller(self): # PID 1 (init) is special and will never go away, # only be reassigned. # Parent polling doesn't work if ppid == 1 to start with. - self.poller = ParentPollerUnix() + self.poller = ParentPollerUnix(parent_pid=self.parent_handle) def _try_bind_socket(self, s, port): iface = f"{self.transport}://{self.ip}" diff --git a/ipykernel/parentpoller.py b/ipykernel/parentpoller.py index a6d9c7538..895a785c7 100644 --- a/ipykernel/parentpoller.py +++ b/ipykernel/parentpoller.py @@ -22,9 +22,17 @@ class ParentPollerUnix(Thread): when the parent process no longer exists. """ - def __init__(self): - """Initialize the poller.""" + def __init__(self, parent_pid=0): + """Initialize the poller. + + Parameters + ---------- + parent_handle : int, optional + If provided, the program will terminate immediately when + process parent is no longer this original parent. + """ super().__init__() + self.parent_pid = parent_pid self.daemon = True def run(self): @@ -32,9 +40,23 @@ def run(self): # We cannot use os.waitpid because it works only for child processes. from errno import EINTR + # before start, check if the passed-in parent pid is valid + original_ppid = os.getppid() + if original_ppid != self.parent_pid: + self.parent_pid = 0 + + get_logger().debug( + "%s: poll for parent change with original parent pid=%d", + type(self).__name__, + self.parent_pid, + ) + while True: try: - if os.getppid() == 1: + ppid = os.getppid() + parent_is_init = not self.parent_pid and ppid == 1 + parent_has_changed = self.parent_pid and ppid != self.parent_pid + if parent_is_init or parent_has_changed: get_logger().warning("Parent appears to have exited, shutting down.") os._exit(1) time.sleep(1.0) diff --git a/tests/test_parentpoller.py b/tests/test_parentpoller.py index 97cd80440..716c9e8f5 100644 --- a/tests/test_parentpoller.py +++ b/tests/test_parentpoller.py @@ -9,7 +9,7 @@ @pytest.mark.skipif(os.name == "nt", reason="only works on posix") -def test_parent_poller_unix(): +def test_parent_poller_unix_to_pid1(): poller = ParentPollerUnix() with mock.patch("os.getppid", lambda: 1): # noqa: PT008 @@ -27,6 +27,22 @@ def mock_getppid(): poller.run() +@pytest.mark.skipif(os.name == "nt", reason="only works on posix") +def test_parent_poller_unix_reparent_not_pid1(): + parent_pid = 221 + parent_pids = iter([parent_pid, parent_pid - 1]) + + poller = ParentPollerUnix(parent_pid=parent_pid) + + with mock.patch("os.getppid", lambda: next(parent_pids)): # noqa: PT008 + + def exit_mock(*args): + sys.exit(1) + + with mock.patch("os._exit", exit_mock), pytest.raises(SystemExit): + poller.run() + + @pytest.mark.skipif(os.name != "nt", reason="only works on windows") def test_parent_poller_windows(): poller = ParentPollerWindows(interrupt_handle=1) From de2c4984f007b912da73c15ca237e7db741c3145 Mon Sep 17 00:00:00 2001 From: Carreau Date: Tue, 22 Oct 2024 08:26:13 +0000 Subject: [PATCH 23/97] Publish 7.0.0a0 SHA256 hashes: ipykernel-7.0.0a0-py3-none-any.whl: d5c69fa7dc3461c07e314e971c7421e8478936f1179e8fd48701d4aa633f4f7a ipykernel-7.0.0a0.tar.gz: 143ed530d3930f55246ee3d0b50060962ff1270c271185d7b7832287445f3ecc --- CHANGELOG.md | 51 +++++++++++++++++++++++++++++++++++++++---- ipykernel/_version.py | 2 +- 2 files changed, 48 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a4a8d0a0c..09342c320 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,53 @@ +## 7.0.0a0 + +([Full Changelog](https://github.com/ipython/ipykernel/compare/v6.29.3...314cc49da6e7d69d74f4741d4ea6568e926d1819)) + +### Enhancements made + +- Detect parent change in more cases on unix [#1271](https://github.com/ipython/ipykernel/pull/1271) ([@bluss](https://github.com/bluss)) +- Kernel subshells (JEP91) implementation [#1249](https://github.com/ipython/ipykernel/pull/1249) ([@ianthomas23](https://github.com/ianthomas23)) +- Remove control queue [#1210](https://github.com/ipython/ipykernel/pull/1210) ([@ianthomas23](https://github.com/ianthomas23)) +- Replace Tornado with AnyIO [#1079](https://github.com/ipython/ipykernel/pull/1079) ([@davidbrochart](https://github.com/davidbrochart)) + +### Bugs fixed + +- Fix eventloop integration with anyio [#1265](https://github.com/ipython/ipykernel/pull/1265) ([@ianthomas23](https://github.com/ianthomas23)) +- Explicitly close memory object streams [#1253](https://github.com/ipython/ipykernel/pull/1253) ([@ianthomas23](https://github.com/ianthomas23)) +- Fixed error accessing sys.stdout/sys.stderr when those are None [#1247](https://github.com/ipython/ipykernel/pull/1247) ([@gregory-shklover](https://github.com/gregory-shklover)) +- Correctly handle with_cell_id in async do_execute [#1227](https://github.com/ipython/ipykernel/pull/1227) ([@ianthomas23](https://github.com/ianthomas23)) +- Do not import debugger/debugpy unless needed [#1223](https://github.com/ipython/ipykernel/pull/1223) ([@krassowski](https://github.com/krassowski)) +- Allow datetime or str in test_sequential_control_messages [#1219](https://github.com/ipython/ipykernel/pull/1219) ([@ianthomas23](https://github.com/ianthomas23)) +- Fix side effect import for pickleutil [#1217](https://github.com/ipython/ipykernel/pull/1217) ([@blink1073](https://github.com/blink1073)) + +### Maintenance and upkeep improvements + +- Remove direct use of asyncio [#1266](https://github.com/ipython/ipykernel/pull/1266) ([@davidbrochart](https://github.com/davidbrochart)) +- Specify argtypes when using macos msg [#1264](https://github.com/ipython/ipykernel/pull/1264) ([@ianthomas23](https://github.com/ianthomas23)) +- Forward port changelog for 6.29.4 and 5 to main branch [#1263](https://github.com/ipython/ipykernel/pull/1263) ([@ianthomas23](https://github.com/ianthomas23)) +- Ignore warning from trio [#1262](https://github.com/ipython/ipykernel/pull/1262) ([@ianthomas23](https://github.com/ianthomas23)) +- Build docs on ubuntu [#1257](https://github.com/ipython/ipykernel/pull/1257) ([@blink1073](https://github.com/blink1073)) +- Avoid a DeprecationWarning on Python 3.13+ [#1248](https://github.com/ipython/ipykernel/pull/1248) ([@hroncok](https://github.com/hroncok)) +- Catch IPython 8.24 DeprecationWarnings [#1242](https://github.com/ipython/ipykernel/pull/1242) ([@s-t-e-v-e-n-k](https://github.com/s-t-e-v-e-n-k)) +- Update version to 7.0.0 [#1241](https://github.com/ipython/ipykernel/pull/1241) ([@mlucool](https://github.com/mlucool)) +- Add compat with pytest 8 [#1231](https://github.com/ipython/ipykernel/pull/1231) ([@blink1073](https://github.com/blink1073)) +- Set all min deps [#1229](https://github.com/ipython/ipykernel/pull/1229) ([@blink1073](https://github.com/blink1073)) +- Update Release Scripts [#1221](https://github.com/ipython/ipykernel/pull/1221) ([@blink1073](https://github.com/blink1073)) + +### Documentation improvements + +- Forward port changelog for 6.29.4 and 5 to main branch [#1263](https://github.com/ipython/ipykernel/pull/1263) ([@ianthomas23](https://github.com/ianthomas23)) + +### Contributors to this release + +([GitHub contributors page for this release](https://github.com/ipython/ipykernel/graphs/contributors?from=2024-02-26&to=2024-10-22&type=c)) + +[@agronholm](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aagronholm+updated%3A2024-02-26..2024-10-22&type=Issues) | [@blink1073](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Ablink1073+updated%3A2024-02-26..2024-10-22&type=Issues) | [@bluss](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Abluss+updated%3A2024-02-26..2024-10-22&type=Issues) | [@Carreau](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3ACarreau+updated%3A2024-02-26..2024-10-22&type=Issues) | [@davidbrochart](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Adavidbrochart+updated%3A2024-02-26..2024-10-22&type=Issues) | [@gregory-shklover](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Agregory-shklover+updated%3A2024-02-26..2024-10-22&type=Issues) | [@hroncok](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Ahroncok+updated%3A2024-02-26..2024-10-22&type=Issues) | [@ianthomas23](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aianthomas23+updated%3A2024-02-26..2024-10-22&type=Issues) | [@ivanov](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aivanov+updated%3A2024-02-26..2024-10-22&type=Issues) | [@krassowski](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Akrassowski+updated%3A2024-02-26..2024-10-22&type=Issues) | [@maartenbreddels](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Amaartenbreddels+updated%3A2024-02-26..2024-10-22&type=Issues) | [@minrk](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aminrk+updated%3A2024-02-26..2024-10-22&type=Issues) | [@mlucool](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Amlucool+updated%3A2024-02-26..2024-10-22&type=Issues) | [@s-t-e-v-e-n-k](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3As-t-e-v-e-n-k+updated%3A2024-02-26..2024-10-22&type=Issues) + + + ## 6.29.5 ([Full Changelog](https://github.com/ipython/ipykernel/compare/v6.29.4...1e62d48298e353a9879fae99bc752f9bb48797ef)) @@ -20,8 +67,6 @@ [@blink1073](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Ablink1073+updated%3A2024-03-27..2024-06-29&type=Issues) | [@ianthomas23](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aianthomas23+updated%3A2024-03-27..2024-06-29&type=Issues) - - ## 6.29.4 ([Full Changelog](https://github.com/ipython/ipykernel/compare/v6.29.3...1cea5332ffc37f32e8232fd2b8b8ddd91b2bbdcf)) @@ -67,8 +112,6 @@ [@blink1073](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Ablink1073+updated%3A2024-02-07..2024-02-26&type=Issues) | [@ccordoba12](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Accordoba12+updated%3A2024-02-07..2024-02-26&type=Issues) | [@jdranczewski](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Ajdranczewski+updated%3A2024-02-07..2024-02-26&type=Issues) | [@joouha](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Ajoouha+updated%3A2024-02-07..2024-02-26&type=Issues) | [@krassowski](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Akrassowski+updated%3A2024-02-07..2024-02-26&type=Issues) - - ## 6.29.2 ([Full Changelog](https://github.com/ipython/ipykernel/compare/v6.29.1...d45fe71990d26c0bd5b7b3b2a4ccd3d1f6609899)) diff --git a/ipykernel/_version.py b/ipykernel/_version.py index 8a7bc44eb..d9a9f5346 100644 --- a/ipykernel/_version.py +++ b/ipykernel/_version.py @@ -5,7 +5,7 @@ from typing import List # Version string must appear intact for hatch versioning -__version__ = "7.0.0" +__version__ = "7.0.0a0" # Build up version_info tuple for backwards compatibility pattern = r"(?P\d+).(?P\d+).(?P\d+)(?P.*)" From a96c9feb08154893da66946e2903bacf949496a9 Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Tue, 22 Oct 2024 02:27:03 -0700 Subject: [PATCH 24/97] Try to add workflow to publish nightlies (#1276) --- .github/workflows/nightly.yml | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 .github/workflows/nightly.yml diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml new file mode 100644 index 000000000..a60be2b40 --- /dev/null +++ b/.github/workflows/nightly.yml @@ -0,0 +1,32 @@ +name: nightly build and upload +on: + workflow_dispatch: + schedule: + - cron: "0 0 * * *" + +defaults: + run: + shell: bash -eux {0} + +jobs: + build: + runs-on: "ubuntu-latest" + strategy: + fail-fast: false + matrix: + python-version: ["3.12"] + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Base Setup + uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 + + - name: Build + run: | + python -m build + - name: Upload wheel + uses: scientific-python/upload-nightly-action@82396a2ed4269ba06c6b2988bb4fd568ef3c3d6b # 0.6.1 + with: + artifacts_path: dist + anaconda_nightly_upload_token: ${{secrets.UPLOAD_TOKEN}} From a991039a2e25493bf11744688aedb490e00719b1 Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Tue, 22 Oct 2024 11:29:17 +0200 Subject: [PATCH 25/97] install build --- .github/workflows/nightly.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index a60be2b40..499f43562 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -24,6 +24,7 @@ jobs: - name: Build run: | + python -m pip install build python -m build - name: Upload wheel uses: scientific-python/upload-nightly-action@82396a2ed4269ba06c6b2988bb4fd568ef3c3d6b # 0.6.1 From e3ae39c08422d0351c39ae1622638146b9a3737f Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Tue, 22 Oct 2024 06:01:30 -0700 Subject: [PATCH 26/97] Remove some potential dead-code. (#1273) --- examples/embedding/inprocess_qtconsole.py | 34 ++--------------------- ipykernel/kernelapp.py | 2 +- tests/test_eventloop.py | 22 +-------------- tests/test_kernel.py | 4 +-- 4 files changed, 6 insertions(+), 56 deletions(-) diff --git a/examples/embedding/inprocess_qtconsole.py b/examples/embedding/inprocess_qtconsole.py index 18ce28638..7a976a319 100644 --- a/examples/embedding/inprocess_qtconsole.py +++ b/examples/embedding/inprocess_qtconsole.py @@ -1,54 +1,24 @@ """An in-process qt console app.""" import os -import sys import tornado from IPython.lib import guisupport from qtconsole.inprocess import QtInProcessKernelManager from qtconsole.rich_ipython_widget import RichIPythonWidget +assert tornado.version_info >= (6, 1) + def print_process_id(): """Print the process id.""" print("Process ID is:", os.getpid()) -def init_asyncio_patch(): - """set default asyncio policy to be compatible with tornado - Tornado 6 (at least) is not compatible with the default - asyncio implementation on Windows - Pick the older SelectorEventLoopPolicy on Windows - if the known-incompatible default policy is in use. - do this as early as possible to make it a low priority and overridable - ref: https://github.com/tornadoweb/tornado/issues/2608 - FIXME: if/when tornado supports the defaults in asyncio, - remove and bump tornado requirement for py38 - """ - if ( - sys.platform.startswith("win") - and sys.version_info >= (3, 8) - and tornado.version_info < (6, 1) - ): - import asyncio - - try: - from asyncio import WindowsProactorEventLoopPolicy, WindowsSelectorEventLoopPolicy - except ImportError: - pass - # not affected - else: - if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy: - # WindowsProactorEventLoopPolicy is not compatible with tornado 6 - # fallback to the pre-3.8 default of Selector - asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy()) - - def main(): """The main entry point.""" # Print the ID of the main process print_process_id() - init_asyncio_patch() app = guisupport.get_app_qt4() # Create an in-process kernel diff --git a/ipykernel/kernelapp.py b/ipykernel/kernelapp.py index 394f52a47..55efaa8e4 100644 --- a/ipykernel/kernelapp.py +++ b/ipykernel/kernelapp.py @@ -657,7 +657,7 @@ def _init_asyncio_patch(self): where asyncio.ProactorEventLoop supports add_reader and friends. """ - if sys.platform.startswith("win") and sys.version_info >= (3, 8): + if sys.platform.startswith("win"): import asyncio try: diff --git a/tests/test_eventloop.py b/tests/test_eventloop.py index 77596eedd..62a7f8ba3 100644 --- a/tests/test_eventloop.py +++ b/tests/test_eventloop.py @@ -7,7 +7,6 @@ import time import pytest -import tornado from ipykernel.eventloops import ( enable_gui, @@ -16,7 +15,7 @@ loop_tk, ) -from .utils import execute, flush_channels, start_new_kernel +from .utils import flush_channels, start_new_kernel KC = KM = None @@ -61,25 +60,6 @@ def _setup_env(): """ -@pytest.mark.skipif(tornado.version_info < (5,), reason="only relevant on tornado 5") -def test_asyncio_interrupt(): - assert KM is not None - assert KC is not None - flush_channels(KC) - msg_id, content = execute("%gui asyncio", KC) - assert content["status"] == "ok", content - - flush_channels(KC) - msg_id, content = execute(async_code, KC) - assert content["status"] == "ok", content - - KM.interrupt_kernel() - - flush_channels(KC) - msg_id, content = execute(async_code, KC) - assert content["status"] == "ok" - - windows_skip = pytest.mark.skipif(os.name == "nt", reason="causing failures on windows") diff --git a/tests/test_kernel.py b/tests/test_kernel.py index 88f02ae9a..89d5e390b 100644 --- a/tests/test_kernel.py +++ b/tests/test_kernel.py @@ -212,7 +212,7 @@ def test_sys_path_profile_dir(): @flaky(max_runs=3) @pytest.mark.skipif( - sys.platform == "win32" or (sys.platform == "darwin" and sys.version_info >= (3, 8)), + sys.platform == "win32" or (sys.platform == "darwin"), reason="subprocess prints fail on Windows and MacOS Python 3.8+", ) def test_subprocess_print(): @@ -267,7 +267,7 @@ def test_subprocess_noprint(): @flaky(max_runs=3) @pytest.mark.skipif( - sys.platform == "win32" or (sys.platform == "darwin" and sys.version_info >= (3, 8)), + (sys.platform == "win32") or (sys.platform == "darwin"), reason="subprocess prints fail on Windows and MacOS Python 3.8+", ) def test_subprocess_error(): From 1800bb802bc243fa2a6ea88b3568fc5c43543ee2 Mon Sep 17 00:00:00 2001 From: Min RK Date: Tue, 22 Oct 2024 14:49:37 +0200 Subject: [PATCH 27/97] remove deprecated ipyparallel methods no version of ipython parallel without these methods will work with ipykernel 7 anyway --- ipykernel/kernelbase.py | 104 ++++------------------------------------ 1 file changed, 10 insertions(+), 94 deletions(-) diff --git a/ipykernel/kernelbase.py b/ipykernel/kernelbase.py index 99358f9b6..4dcb5877c 100644 --- a/ipykernel/kernelbase.py +++ b/ipykernel/kernelbase.py @@ -48,7 +48,6 @@ Instance, Integer, List, - Set, Unicode, default, ) @@ -199,9 +198,6 @@ def _parent_header(self): # by record_ports and used by connect_request. _recorded_ports = Dict() - # set of aborted msg_ids - aborted = Set() - # Track execution count here. For IPython, we override this to use the # execution count we store in the shell. execution_count = 0 @@ -217,14 +213,10 @@ def _parent_header(self): "shutdown_request", "is_complete_request", "interrupt_request", - # deprecated: - "apply_request", ] # add deprecated ipyparallel control messages control_msg_types = [ *msg_types, - "clear_request", - "abort_request", "debug_request", "usage_request", "create_subshell_request", @@ -308,17 +300,15 @@ async def process_control_message(self, msg=None): sys.stderr.flush() self._publish_status("idle", "control") - async def should_handle(self, stream, msg, idents): + def should_handle(self, stream, msg, idents): """Check whether a shell-channel message should be handled Allows subclasses to prevent handling of certain messages (e.g. aborted requests). + + .. versionchanged:: 7 + Subclass should_handle _may_ be async. + Base class implementation is not async. """ - msg_id = msg["header"]["msg_id"] - if msg_id in self.aborted: - # is it safe to assume a msg_id will not be resubmitted? - self.aborted.remove(msg_id) - await self._send_abort_reply(stream, msg, idents) - return False return True async def enter_eventloop(self): @@ -483,7 +473,11 @@ async def process_shell_message(self, msg=None, socket=None): self.log.debug("\n*** MESSAGE TYPE:%s***", msg_type) self.log.debug(" Content: %s\n --->\n ", msg["content"]) - if not await self.should_handle(socket, msg, idents): + should_handle: bool | t.Awaitable[bool] = self.should_handle(socket, msg, idents) + if inspect.isawaitable(should_handle): + should_handle = await should_handle + if not should_handle: + self.log.debug("Not handling %s:%s", msg_type, msg["header"].get("msg_id")) return handler = self.shell_handlers.get(msg_type) @@ -1126,84 +1120,6 @@ async def list_subshell_request(self, socket, ident, parent) -> None: self.session.send(socket, "list_subshell_reply", reply, parent, ident) - # --------------------------------------------------------------------------- - # Engine methods (DEPRECATED) - # --------------------------------------------------------------------------- - - async def apply_request(self, socket, ident, parent): # pragma: no cover - """Handle an apply request.""" - self.log.warning("apply_request is deprecated in kernel_base, moving to ipyparallel.") - try: - content = parent["content"] - bufs = parent["buffers"] - msg_id = parent["header"]["msg_id"] - except Exception: - self.log.error("Got bad msg: %s", parent, exc_info=True) # noqa: G201 - return - - md = self.init_metadata(parent) - - reply_content, result_buf = self.do_apply(content, bufs, msg_id, md) - - # flush i/o - if sys.stdout is not None: - sys.stdout.flush() - if sys.stderr is not None: - sys.stderr.flush() - - md = self.finish_metadata(parent, md, reply_content) - if not self.session: - return - self.session.send( - socket, - "apply_reply", - reply_content, - parent=parent, - ident=ident, - buffers=result_buf, - metadata=md, - ) - - def do_apply(self, content, bufs, msg_id, reply_metadata): - """DEPRECATED""" - raise NotImplementedError - - # --------------------------------------------------------------------------- - # Control messages (DEPRECATED) - # --------------------------------------------------------------------------- - - async def abort_request(self, socket, ident, parent): # pragma: no cover - """abort a specific msg by id""" - self.log.warning( - "abort_request is deprecated in kernel_base. It is only part of IPython parallel" - ) - msg_ids = parent["content"].get("msg_ids", None) - if isinstance(msg_ids, str): - msg_ids = [msg_ids] - for mid in msg_ids: - self.aborted.add(str(mid)) - - content = dict(status="ok") - if not self.session: - return - reply_msg = self.session.send( - socket, "abort_reply", content=content, parent=parent, ident=ident - ) - self.log.debug("%s", reply_msg) - - async def clear_request(self, socket, idents, parent): # pragma: no cover - """Clear our namespace.""" - self.log.warning( - "clear_request is deprecated in kernel_base. It is only part of IPython parallel" - ) - content = self.do_clear() - if self.session: - self.session.send(socket, "clear_reply", ident=idents, parent=parent, content=content) - - def do_clear(self): - """DEPRECATED since 4.0.3""" - raise NotImplementedError - # --------------------------------------------------------------------------- # Protected interface # --------------------------------------------------------------------------- From 93fabd2aacf4d3284d421cf9813ca51329007abf Mon Sep 17 00:00:00 2001 From: Min RK Date: Tue, 22 Oct 2024 15:18:15 +0200 Subject: [PATCH 28/97] update test_should_handle --- tests/test_kernel_direct.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/test_kernel_direct.py b/tests/test_kernel_direct.py index 50801b033..ab62404b2 100644 --- a/tests/test_kernel_direct.py +++ b/tests/test_kernel_direct.py @@ -129,10 +129,9 @@ async def test_process_control(kernel): await kernel.process_control_message(msg) -async def test_should_handle(kernel): +def test_should_handle(kernel): msg = kernel.session.msg("debug_request", {}) - kernel.aborted.add(msg["header"]["msg_id"]) - assert not await kernel.should_handle(kernel.control_socket, msg, []) + assert kernel.should_handle(kernel.control_socket, msg, []) is True async def test_dispatch_shell(kernel): From 2ca799276f4458d50085bb1ef8e2ddeb08a283a6 Mon Sep 17 00:00:00 2001 From: Min RK Date: Tue, 22 Oct 2024 15:37:24 +0200 Subject: [PATCH 29/97] socket must be None, not shell_socket for default shell (#1281) --- ipykernel/kernelbase.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ipykernel/kernelbase.py b/ipykernel/kernelbase.py index 99358f9b6..c6b18eb70 100644 --- a/ipykernel/kernelbase.py +++ b/ipykernel/kernelbase.py @@ -412,7 +412,7 @@ async def shell_main(self, subshell_id: str | None): else: assert subshell_id is None assert threading.current_thread() == threading.main_thread() - socket = self.shell_socket + socket = None async with create_task_group() as tg: tg.start_soon(self.process_shell, socket) From a60c86400ccb7346cf9e92c2ef7d55df961d5820 Mon Sep 17 00:00:00 2001 From: Ian Thomas Date: Fri, 25 Oct 2024 12:14:10 +0100 Subject: [PATCH 30/97] Drop support for Python 3.8 (#1284) --- .github/workflows/ci.yml | 4 +--- pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 233aabbda..c86e5cf42 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -22,10 +22,8 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, windows-latest, macos-latest] - python-version: ["3.8", "3.12"] + python-version: ["3.9", "3.12"] include: - - os: windows-latest - python-version: "3.9" - os: ubuntu-latest python-version: "pypy-3.9" - os: macos-latest diff --git a/pyproject.toml b/pyproject.toml index aeaeef463..1e9e543c2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,7 +18,7 @@ classifiers = [ "Programming Language :: Python", "Programming Language :: Python :: 3", ] -requires-python = ">=3.8" +requires-python = ">=3.9" dependencies = [ "debugpy>=1.8.1", "ipython>=7.23.1", From 41a965eb35b3364a56e5f1e827d7e3a9580c0213 Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Fri, 25 Oct 2024 05:48:16 -0700 Subject: [PATCH 31/97] start testing on 3.13 (#1277) Co-authored-by: Min RK --- .github/workflows/ci.yml | 5 ++++- docs/conf.py | 8 ++++---- ipykernel/_version.py | 3 +-- ipykernel/debugger.py | 2 +- ipykernel/inprocess/channels.py | 4 +--- ipykernel/iostream.py | 8 ++++---- ipykernel/ipkernel.py | 18 ++++++++++++------ ipykernel/pickleutil.py | 4 ++-- ipykernel/thread.py | 2 +- pyproject.toml | 3 +++ 10 files changed, 33 insertions(+), 24 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c86e5cf42..70b3bf405 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -22,7 +22,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, windows-latest, macos-latest] - python-version: ["3.9", "3.12"] + python-version: ["3.9", "3.13"] include: - os: ubuntu-latest python-version: "pypy-3.9" @@ -30,6 +30,8 @@ jobs: python-version: "3.10" - os: ubuntu-latest python-version: "3.11" + - os: ubuntu-latest + python-version: "3.12" steps: - name: Checkout uses: actions/checkout@v4 @@ -148,6 +150,7 @@ jobs: uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 with: dependency_type: minimum + python_version: "3.9" - name: List installed packages run: | diff --git a/docs/conf.py b/docs/conf.py index 4bb599327..38a724b52 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -13,7 +13,7 @@ import os import shutil from pathlib import Path -from typing import Any, Dict, List +from typing import Any # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the @@ -70,7 +70,7 @@ # built documents. # -version_ns: Dict[str, Any] = {} +version_ns: dict[str, Any] = {} here = Path(__file__).parent.resolve() version_py = Path(here) / os.pardir / "ipykernel" / "_version.py" with open(version_py) as f: @@ -159,7 +159,7 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path: List[str] = [] +html_static_path: list[str] = [] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied @@ -226,7 +226,7 @@ # -- Options for LaTeX output --------------------------------------------- -latex_elements: Dict[str, object] = {} +latex_elements: dict[str, object] = {} # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, diff --git a/ipykernel/_version.py b/ipykernel/_version.py index d9a9f5346..5907d150c 100644 --- a/ipykernel/_version.py +++ b/ipykernel/_version.py @@ -2,7 +2,6 @@ store the current version info of the server. """ import re -from typing import List # Version string must appear intact for hatch versioning __version__ = "7.0.0a0" @@ -11,7 +10,7 @@ pattern = r"(?P\d+).(?P\d+).(?P\d+)(?P.*)" match = re.match(pattern, __version__) assert match is not None -parts: List[object] = [int(match[part]) for part in ["major", "minor", "patch"]] +parts: list[object] = [int(match[part]) for part in ["major", "minor", "patch"]] if match["rest"]: parts.append(match["rest"]) version_info = tuple(parts) diff --git a/ipykernel/debugger.py b/ipykernel/debugger.py index 8680793fd..780d18015 100644 --- a/ipykernel/debugger.py +++ b/ipykernel/debugger.py @@ -130,7 +130,7 @@ def _reset_tcp_pos(self): def _put_message(self, raw_msg): self.log.debug("QUEUE - _put_message:") - msg = t.cast(t.Dict[str, t.Any], jsonapi.loads(raw_msg)) + msg = t.cast(dict[str, t.Any], jsonapi.loads(raw_msg)) if msg["type"] == "event": self.log.debug("QUEUE - received event:") self.log.debug(msg) diff --git a/ipykernel/inprocess/channels.py b/ipykernel/inprocess/channels.py index 378416dcc..4c01c5bcb 100644 --- a/ipykernel/inprocess/channels.py +++ b/ipykernel/inprocess/channels.py @@ -3,8 +3,6 @@ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -from typing import List - from jupyter_client.channelsabc import HBChannelABC # ----------------------------------------------------------------------------- @@ -15,7 +13,7 @@ class InProcessChannel: """Base class for in-process channels.""" - proxy_methods: List[object] = [] + proxy_methods: list[object] = [] def __init__(self, client=None): """Initialize the channel.""" diff --git a/ipykernel/iostream.py b/ipykernel/iostream.py index beca44b16..81170b97f 100644 --- a/ipykernel/iostream.py +++ b/ipykernel/iostream.py @@ -15,7 +15,7 @@ from collections import defaultdict, deque from io import StringIO, TextIOBase from threading import Event, Thread, local -from typing import Any, Callable, Deque, Dict, Optional +from typing import Any, Callable, Optional import zmq from anyio import create_task_group, run, sleep, to_thread @@ -94,8 +94,8 @@ def __init__(self, socket, pipe=False): if pipe: self._setup_pipe_in() self._local = threading.local() - self._events: Deque[Callable[..., Any]] = deque() - self._event_pipes: Dict[threading.Thread, Any] = {} + self._events: deque[Callable[..., Any]] = deque() + self._event_pipes: dict[threading.Thread, Any] = {} self._event_pipe_gc_lock: threading.Lock = threading.Lock() self._event_pipe_gc_seconds: float = 10 self._setup_event_pipe() @@ -470,7 +470,7 @@ def __init__( self.pub_thread = pub_thread self.name = name self.topic = b"stream." + name.encode() - self._parent_header: contextvars.ContextVar[Dict[str, Any]] = contextvars.ContextVar( + self._parent_header: contextvars.ContextVar[dict[str, Any]] = contextvars.ContextVar( "parent_header" ) self._parent_header.set({}) diff --git a/ipykernel/ipkernel.py b/ipykernel/ipkernel.py index db83d986f..48efa6cd6 100644 --- a/ipykernel/ipkernel.py +++ b/ipykernel/ipkernel.py @@ -1,5 +1,7 @@ """The IPython kernel implementation""" +from __future__ import annotations + import builtins import gc import getpass @@ -16,7 +18,7 @@ from IPython.core import release from IPython.utils.tokenutil import line_at_cursor, token_at_cursor from jupyter_client.session import extract_header -from traitlets import Any, Bool, HasTraits, Instance, List, Type, observe, observe_compat +from traitlets import Any, Bool, HasTraits, Instance, List, Type, default, observe, observe_compat from .comm.comm import BaseComm from .comm.manager import CommManager @@ -46,7 +48,7 @@ def _create_comm(*args, **kwargs): # there can only be one comm manager in a ipykernel process _comm_lock = threading.Lock() -_comm_manager: t.Optional[CommManager] = None +_comm_manager: CommManager | None = None def _get_comm_manager(*args, **kwargs): @@ -84,7 +86,11 @@ def _user_module_changed(self, change): if self.shell is not None: self.shell.user_module = change["new"] - user_ns = Instance(dict, args=None, allow_none=True) + user_ns = Instance("collections.abc.Mapping", allow_none=True) + + @default("user_ns") + def _default_user_ns(self): + return dict() @observe("user_ns") @observe_compat @@ -353,7 +359,7 @@ async def do_execute( self._forward_input(allow_stdin) - reply_content: t.Dict[str, t.Any] = {} + reply_content: dict[str, t.Any] = {} if hasattr(shell, "run_cell_async") and hasattr(shell, "should_run_async"): run_cell = shell.run_cell_async should_run_async = shell.should_run_async @@ -559,7 +565,7 @@ def do_inspect(self, code, cursor_pos, detail_level=0, omit_sections=()): """Handle code inspection.""" name = token_at_cursor(code, cursor_pos) - reply_content: t.Dict[str, t.Any] = {"status": "ok"} + reply_content: dict[str, t.Any] = {"status": "ok"} reply_content["data"] = {} reply_content["metadata"] = {} assert self.shell is not None @@ -755,7 +761,7 @@ def init_closure(self: threading.Thread, *args, **kwargs): threading.Thread.run = run_closure # type:ignore[method-assign] def _clean_thread_parent_frames( - self, phase: t.Literal["start", "stop"], info: t.Dict[str, t.Any] + self, phase: t.Literal["start", "stop"], info: dict[str, t.Any] ): """Clean parent frames of threads which are no longer running. This is meant to be invoked by garbage collector callback hook. diff --git a/ipykernel/pickleutil.py b/ipykernel/pickleutil.py index 6f1565943..4ffa5262e 100644 --- a/ipykernel/pickleutil.py +++ b/ipykernel/pickleutil.py @@ -209,7 +209,7 @@ def __init__(self, f): """Initialize the can""" self._check_type(f) self.code = f.__code__ - self.defaults: typing.Optional[typing.List[typing.Any]] + self.defaults: typing.Optional[list[typing.Any]] if f.__defaults__: self.defaults = [can(fd) for fd in f.__defaults__] else: @@ -475,7 +475,7 @@ def uncan_sequence(obj, g=None): if buffer is not memoryview: can_map[buffer] = CannedBuffer -uncan_map: typing.Dict[type, typing.Any] = { +uncan_map: dict[type, typing.Any] = { CannedObject: lambda obj, g: obj.get_object(g), dict: uncan_dict, } diff --git a/ipykernel/thread.py b/ipykernel/thread.py index a63011de7..40509eced 100644 --- a/ipykernel/thread.py +++ b/ipykernel/thread.py @@ -17,7 +17,7 @@ def __init__(self, **kwargs): self.pydev_do_not_trace = True self.is_pydev_daemon_thread = True self.__stop = Event() - self._tasks_and_args: t.List[t.Tuple[t.Any, t.Any]] = [] + self._tasks_and_args: list[tuple[t.Any, t.Any]] = [] def add_task(self, task: t.Any, *args: t.Any) -> None: # May only add tasks before the thread is started. diff --git a/pyproject.toml b/pyproject.toml index 1e9e543c2..e1d7b1d5b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -187,6 +187,9 @@ filterwarnings= [ # https://github.com/python-trio/trio/issues/3053 "ignore:The `hash` argument is deprecated in favor of `unsafe_hash` and will be removed in or after August 2025.", + + # ignore unclosed sqlite in traits + "ignore:unclosed database in Date: Fri, 25 Oct 2024 14:49:01 +0200 Subject: [PATCH 32/97] restore zero-copy recv on shell messages (#1280) --- ipykernel/kernelbase.py | 20 +++++++------------- 1 file changed, 7 insertions(+), 13 deletions(-) diff --git a/ipykernel/kernelbase.py b/ipykernel/kernelbase.py index c6b18eb70..1c623c085 100644 --- a/ipykernel/kernelbase.py +++ b/ipykernel/kernelbase.py @@ -274,10 +274,9 @@ async def process_control_message(self, msg=None): assert self.control_thread is None or threading.current_thread() == self.control_thread msg = msg or await self.control_socket.recv_multipart() - copy = not isinstance(msg[0], zmq.Message) - idents, msg = self.session.feed_identities(msg, copy=copy) + idents, msg = self.session.feed_identities(msg) try: - msg = self.session.deserialize(msg, content=True, copy=copy) + msg = self.session.deserialize(msg, content=True) except Exception: self.log.error("Invalid Control Message", exc_info=True) # noqa: G201 return @@ -375,15 +374,12 @@ async def shell_channel_thread_main(self): try: while True: - msg = await self.shell_socket.recv_multipart() - - # Deserialize whole message just to get subshell_id. + msg = await self.shell_socket.recv_multipart(copy=False) + # deserialize only the header to get subshell_id # Keep original message to send to subshell_id unmodified. - # Ideally only want to deserialize message once. - copy = not isinstance(msg[0], zmq.Message) - _, msg2 = self.session.feed_identities(msg, copy=copy) + _, msg2 = self.session.feed_identities(msg, copy=False) try: - msg3 = self.session.deserialize(msg2, content=False, copy=copy) + msg3 = self.session.deserialize(msg2, content=False, copy=False) subshell_id = msg3["header"].get("subshell_id") # Find inproc pair socket to use to send message to correct subshell. @@ -1210,9 +1206,7 @@ def do_clear(self): def _topic(self, topic): """prefixed topic for IOPub messages""" - base = "kernel.%s" % self.ident - - return (f"{base}.{topic}").encode() + return (f"kernel.{self.ident}.{topic}").encode() _aborting = Bool(False) From 6299acba8413566107f12f6b346172427362e79f Mon Sep 17 00:00:00 2001 From: Min RK Date: Fri, 25 Oct 2024 15:15:33 +0200 Subject: [PATCH 33/97] update control channel message type comment --- ipykernel/kernelbase.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ipykernel/kernelbase.py b/ipykernel/kernelbase.py index 4dcb5877c..5acc7424f 100644 --- a/ipykernel/kernelbase.py +++ b/ipykernel/kernelbase.py @@ -214,7 +214,9 @@ def _parent_header(self): "is_complete_request", "interrupt_request", ] - # add deprecated ipyparallel control messages + + # control channel accepts all shell messages + # and some of its own control_msg_types = [ *msg_types, "debug_request", From 5ce5beec4212c8b1b5a98ce8533ff3062f6aa239 Mon Sep 17 00:00:00 2001 From: Ian Thomas Date: Fri, 25 Oct 2024 14:50:08 +0100 Subject: [PATCH 34/97] Improve robustness of subshell concurrency tests (#1285) --- tests/test_subshells.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/tests/test_subshells.py b/tests/test_subshells.py index f1328ddad..d8d81aeae 100644 --- a/tests/test_subshells.py +++ b/tests/test_subshells.py @@ -133,15 +133,20 @@ def test_run_concurrently_sequence(are_subshells, overlap): create_subshell_helper(kc)["subshell_id"] if is_subshell else None for is_subshell in are_subshells ] + + # Import time module before running time-sensitive subshell code. + execute_request_subshell_id(kc, "import time; print('ok')", None) + + sleep = 0.2 if overlap: codes = [ - "import time; start0=True; end0=False; time.sleep(0.2); end0=True", - "assert start0; assert not end0; time.sleep(0.2); assert end0", + f"start0=True; end0=False; time.sleep({sleep}); end0=True", + f"time.sleep({sleep/2}); assert start0; assert not end0; time.sleep({sleep}); assert end0", ] else: codes = [ - "import time; start0=True; end0=False; time.sleep(0.2); assert end1", - "assert start0; assert not end0; end1=True", + f"start0=True; end0=False; time.sleep({sleep}); assert end1", + f"time.sleep({sleep/2}); assert start0; assert not end0; end1=True", ] msgs = [] @@ -150,8 +155,6 @@ def test_run_concurrently_sequence(are_subshells, overlap): msg["header"]["subshell_id"] = subshell_id kc.shell_channel.send(msg) msgs.append(msg) - if len(msgs) == 1: - time.sleep(0.1) # Wait for first execute_request to start. replies = get_replies(kc, [msg["msg_id"] for msg in msgs]) @@ -171,13 +174,16 @@ def test_run_concurrently_timing(include_main_shell): create_subshell_helper(kc)["subshell_id"], ] + # Import time module before running time-sensitive subshell code. + execute_request_subshell_id(kc, "import time; print('ok')", None) + times = (0.2, 0.2) # Prepare messages, times are sleep times in seconds. # Identical times for both subshells is a harder test as preparing and sending # the execute_reply messages may overlap. msgs = [] for id, sleep in zip(subshell_ids, times): - code = f"import time; time.sleep({sleep})" + code = f"time.sleep({sleep})" msg = kc.session.msg("execute_request", {"code": code}) msg["header"]["subshell_id"] = id msgs.append(msg) From bf104470d2efe50437523697b736ac66b2c2fc16 Mon Sep 17 00:00:00 2001 From: Min RK Date: Sat, 26 Oct 2024 19:31:46 +0200 Subject: [PATCH 35/97] fix mixture of sync/async sockets in IOPubThread (#1275) --- ipykernel/inprocess/ipkernel.py | 4 +- ipykernel/inprocess/socket.py | 3 ++ ipykernel/iostream.py | 75 +++++++++++++++++++-------------- tests/test_io.py | 4 +- tests/test_kernel.py | 24 +++++------ 5 files changed, 64 insertions(+), 46 deletions(-) diff --git a/ipykernel/inprocess/ipkernel.py b/ipykernel/inprocess/ipkernel.py index 114e231d9..c6f8c6128 100644 --- a/ipykernel/inprocess/ipkernel.py +++ b/ipykernel/inprocess/ipkernel.py @@ -6,6 +6,7 @@ import logging import sys from contextlib import contextmanager +from typing import cast from anyio import TASK_STATUS_IGNORED from anyio.abc import TaskStatus @@ -146,7 +147,8 @@ def callback(msg): assert frontend is not None frontend.iopub_channel.call_handlers(msg) - self.iopub_thread.socket.on_recv = callback + iopub_socket = cast(DummySocket, self.iopub_thread.socket) + iopub_socket.on_recv = callback # ------ Trait initializers ----------------------------------------------- diff --git a/ipykernel/inprocess/socket.py b/ipykernel/inprocess/socket.py index edc77c286..5a2e0008b 100644 --- a/ipykernel/inprocess/socket.py +++ b/ipykernel/inprocess/socket.py @@ -63,3 +63,6 @@ async def poll(self, timeout=0): assert timeout == 0 statistics = self.in_receive_stream.statistics() return statistics.current_buffer_used != 0 + + def close(self): + pass diff --git a/ipykernel/iostream.py b/ipykernel/iostream.py index 81170b97f..d81710175 100644 --- a/ipykernel/iostream.py +++ b/ipykernel/iostream.py @@ -3,6 +3,8 @@ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. +from __future__ import annotations + import atexit import contextvars import io @@ -15,7 +17,7 @@ from collections import defaultdict, deque from io import StringIO, TextIOBase from threading import Event, Thread, local -from typing import Any, Callable, Optional +from typing import Any, Callable import zmq from anyio import create_task_group, run, sleep, to_thread @@ -25,8 +27,8 @@ # Globals # ----------------------------------------------------------------------------- -MASTER = 0 -CHILD = 1 +_PARENT = 0 +_CHILD = 1 PIPE_BUFFER_SIZE = 1000 @@ -87,9 +89,16 @@ def __init__(self, socket, pipe=False): Whether this process should listen for IOPub messages piped from subprocesses. """ - self.socket = socket + # ensure all of our sockets as sync zmq.Sockets + # don't create async wrappers until we are within the appropriate coroutines + self.socket: zmq.Socket[bytes] | None = zmq.Socket(socket) + if self.socket.context is None: + # bug in pyzmq, shadow socket doesn't always inherit context attribute + self.socket.context = socket.context # type:ignore[unreachable] + self._context = socket.context + self.background_socket = BackgroundSocket(self) - self._master_pid = os.getpid() + self._main_pid = os.getpid() self._pipe_flag = pipe if pipe: self._setup_pipe_in() @@ -106,8 +115,7 @@ def __init__(self, socket, pipe=False): def _setup_event_pipe(self): """Create the PULL socket listening for events that should fire in this thread.""" - ctx = self.socket.context - self._pipe_in0 = ctx.socket(zmq.PULL) + self._pipe_in0 = self._context.socket(zmq.PULL, socket_class=zmq.Socket) self._pipe_in0.linger = 0 _uuid = b2a_hex(os.urandom(16)).decode("ascii") @@ -141,8 +149,8 @@ def _event_pipe(self): event_pipe = self._local.event_pipe except AttributeError: # new thread, new event pipe - ctx = zmq.Context(self.socket.context) - event_pipe = ctx.socket(zmq.PUSH) + # create sync base socket + event_pipe = self._context.socket(zmq.PUSH, socket_class=zmq.Socket) event_pipe.linger = 0 event_pipe.connect(self._event_interface) self._local.event_pipe = event_pipe @@ -161,9 +169,11 @@ async def _handle_event(self): Whenever *an* event arrives on the event stream, *all* waiting events are processed in order. """ + # create async wrapper within coroutine + pipe_in = zmq.asyncio.Socket(self._pipe_in0) try: while True: - await self._pipe_in0.recv() + await pipe_in.recv() # freeze event count so new writes don't extend the queue # while we are processing n_events = len(self._events) @@ -177,12 +187,12 @@ async def _handle_event(self): def _setup_pipe_in(self): """setup listening pipe for IOPub from forked subprocesses""" - ctx = self.socket.context + ctx = self._context # use UUID to authenticate pipe messages self._pipe_uuid = os.urandom(16) - self._pipe_in1 = ctx.socket(zmq.PULL) + self._pipe_in1 = ctx.socket(zmq.PULL, socket_class=zmq.Socket) self._pipe_in1.linger = 0 try: @@ -199,6 +209,8 @@ def _setup_pipe_in(self): async def _handle_pipe_msgs(self): """handle pipe messages from a subprocess""" + # create async wrapper within coroutine + self._async_pipe_in1 = zmq.asyncio.Socket(self._pipe_in1) try: while True: await self._handle_pipe_msg() @@ -209,8 +221,8 @@ async def _handle_pipe_msgs(self): async def _handle_pipe_msg(self, msg=None): """handle a pipe message from a subprocess""" - msg = msg or await self._pipe_in1.recv_multipart() - if not self._pipe_flag or not self._is_master_process(): + msg = msg or await self._async_pipe_in1.recv_multipart() + if not self._pipe_flag or not self._is_main_process(): return if msg[0] != self._pipe_uuid: print("Bad pipe message: %s", msg, file=sys.__stderr__) @@ -225,14 +237,14 @@ def _setup_pipe_out(self): pipe_out.connect("tcp://127.0.0.1:%i" % self._pipe_port) return ctx, pipe_out - def _is_master_process(self): - return os.getpid() == self._master_pid + def _is_main_process(self): + return os.getpid() == self._main_pid def _check_mp_mode(self): """check for forks, and switch to zmq pipeline if necessary""" - if not self._pipe_flag or self._is_master_process(): - return MASTER - return CHILD + if not self._pipe_flag or self._is_main_process(): + return _PARENT + return _CHILD def start(self): """Start the IOPub thread""" @@ -265,7 +277,8 @@ def close(self): self._pipe_in0.close() if self._pipe_flag: self._pipe_in1.close() - self.socket.close() + if self.socket is not None: + self.socket.close() self.socket = None @property @@ -301,12 +314,12 @@ def _really_send(self, msg, *args, **kwargs): return mp_mode = self._check_mp_mode() - - if mp_mode != CHILD: - # we are master, do a regular send + if mp_mode != _CHILD: + # we are the main parent process, do a regular send + assert self.socket is not None self.socket.send_multipart(msg, *args, **kwargs) else: - # we are a child, pipe to master + # we are a child, pipe to parent process # new context/socket for every pipe-out # since forks don't teardown politely, use ctx.term to ensure send has completed ctx, pipe_out = self._setup_pipe_out() @@ -379,7 +392,7 @@ class OutStream(TextIOBase): flush_interval = 0.2 topic = None encoding = "UTF-8" - _exc: Optional[Any] = None + _exc: Any = None def fileno(self): """ @@ -477,7 +490,7 @@ def __init__( self._thread_to_parent = {} self._thread_to_parent_header = {} self._parent_header_global = {} - self._master_pid = os.getpid() + self._main_pid = os.getpid() self._flush_pending = False self._subprocess_flush_pending = False self._buffer_lock = threading.RLock() @@ -569,8 +582,8 @@ def _setup_stream_redirects(self, name): self.watch_fd_thread.daemon = True self.watch_fd_thread.start() - def _is_master_process(self): - return os.getpid() == self._master_pid + def _is_main_process(self): + return os.getpid() == self._main_pid def set_parent(self, parent): """Set the parent header.""" @@ -674,7 +687,7 @@ def _flush(self): ident=self.topic, ) - def write(self, string: str) -> Optional[int]: # type:ignore[override] + def write(self, string: str) -> int: """Write to current stream after encoding if necessary Returns @@ -700,7 +713,7 @@ def write(self, string: str) -> Optional[int]: # type:ignore[override] msg = "I/O operation on closed file" raise ValueError(msg) - is_child = not self._is_master_process() + is_child = not self._is_main_process() # only touch the buffer in the IO thread to avoid races with self._buffer_lock: self._buffers[frozenset(parent.items())].write(string) @@ -708,7 +721,7 @@ def write(self, string: str) -> Optional[int]: # type:ignore[override] # mp.Pool cannot be trusted to flush promptly (or ever), # and this helps. if self._subprocess_flush_pending: - return None + return 0 self._subprocess_flush_pending = True # We can not rely on self._io_loop.call_later from a subprocess self.pub_thread.schedule(self._flush) diff --git a/tests/test_io.py b/tests/test_io.py index e49bc2769..e3ff28159 100644 --- a/tests/test_io.py +++ b/tests/test_io.py @@ -15,7 +15,7 @@ import zmq.asyncio from jupyter_client.session import Session -from ipykernel.iostream import MASTER, BackgroundSocket, IOPubThread, OutStream +from ipykernel.iostream import _PARENT, BackgroundSocket, IOPubThread, OutStream @pytest.fixture() @@ -73,7 +73,7 @@ async def test_io_thread(anyio_backend, iopub_thread): ctx1, pipe = thread._setup_pipe_out() pipe.close() thread._pipe_in1.close() - thread._check_mp_mode = lambda: MASTER + thread._check_mp_mode = lambda: _PARENT thread._really_send([b"hi"]) ctx1.destroy() thread.stop() diff --git a/tests/test_kernel.py b/tests/test_kernel.py index 89d5e390b..8efc3dcc1 100644 --- a/tests/test_kernel.py +++ b/tests/test_kernel.py @@ -32,10 +32,10 @@ ) -def _check_master(kc, expected=True, stream="stdout"): +def _check_main(kc, expected=True, stream="stdout"): execute(kc=kc, code="import sys") flush_channels(kc) - msg_id, content = execute(kc=kc, code="print(sys.%s._is_master_process())" % stream) + msg_id, content = execute(kc=kc, code="print(sys.%s._is_main_process())" % stream) stdout, stderr = assemble_output(kc.get_iopub_msg) assert stdout.strip() == repr(expected) @@ -56,7 +56,7 @@ def test_simple_print(): stdout, stderr = assemble_output(kc.get_iopub_msg) assert stdout == "hi\n" assert stderr == "" - _check_master(kc, expected=True) + _check_main(kc, expected=True) def test_print_to_correct_cell_from_thread(): @@ -168,7 +168,7 @@ def test_capture_fd(): stdout, stderr = assemble_output(iopub) assert stdout == "capsys\n" assert stderr == "" - _check_master(kc, expected=True) + _check_main(kc, expected=True) @pytest.mark.skip(reason="Currently don't capture during test as pytest does its own capturing") @@ -182,7 +182,7 @@ def test_subprocess_peek_at_stream_fileno(): stdout, stderr = assemble_output(iopub) assert stdout == "CAP1\nCAP2\n" assert stderr == "" - _check_master(kc, expected=True) + _check_main(kc, expected=True) def test_sys_path(): @@ -218,7 +218,7 @@ def test_sys_path_profile_dir(): def test_subprocess_print(): """printing from forked mp.Process""" with new_kernel() as kc: - _check_master(kc, expected=True) + _check_main(kc, expected=True) flush_channels(kc) np = 5 code = "\n".join( @@ -238,8 +238,8 @@ def test_subprocess_print(): for n in range(np): assert stdout.count(str(n)) == 1, stdout assert stderr == "" - _check_master(kc, expected=True) - _check_master(kc, expected=True, stream="stderr") + _check_main(kc, expected=True) + _check_main(kc, expected=True, stream="stderr") @flaky(max_runs=3) @@ -261,8 +261,8 @@ def test_subprocess_noprint(): assert stdout == "" assert stderr == "" - _check_master(kc, expected=True) - _check_master(kc, expected=True, stream="stderr") + _check_main(kc, expected=True) + _check_main(kc, expected=True, stream="stderr") @flaky(max_runs=3) @@ -287,8 +287,8 @@ def test_subprocess_error(): assert stdout == "" assert "ValueError" in stderr - _check_master(kc, expected=True) - _check_master(kc, expected=True, stream="stderr") + _check_main(kc, expected=True) + _check_main(kc, expected=True, stream="stderr") # raw_input tests From b7ad5c8d1e00ffc48777c610a0573a57b462717d Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Mon, 28 Oct 2024 04:00:48 -0700 Subject: [PATCH 36/97] Add 20 min timeout dowstream ipyparallel (#1287) --- .github/workflows/downstream.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/downstream.yml b/.github/workflows/downstream.yml index 68afcbf34..6d5d08ec5 100644 --- a/.github/workflows/downstream.yml +++ b/.github/workflows/downstream.yml @@ -56,6 +56,7 @@ jobs: ipyparallel: runs-on: ubuntu-latest + timeout-minutes: 20 steps: - name: Checkout uses: actions/checkout@v4 From c353ddfdaee02cd1ce7079f5ba3fb829deb48d81 Mon Sep 17 00:00:00 2001 From: Ian Thomas Date: Tue, 5 Nov 2024 06:41:11 +0000 Subject: [PATCH 37/97] Improve robustness of subshell concurrency tests using Barrier (#1288) --- tests/test_subshells.py | 34 +++++++++++++++++++++++----------- 1 file changed, 23 insertions(+), 11 deletions(-) diff --git a/tests/test_subshells.py b/tests/test_subshells.py index d8d81aeae..141a3b7c4 100644 --- a/tests/test_subshells.py +++ b/tests/test_subshells.py @@ -134,19 +134,22 @@ def test_run_concurrently_sequence(are_subshells, overlap): for is_subshell in are_subshells ] - # Import time module before running time-sensitive subshell code. - execute_request_subshell_id(kc, "import time; print('ok')", None) + # Import time module before running time-sensitive subshell code + # and use threading.Barrier to synchronise start of subshell code. + execute_request_subshell_id( + kc, "import threading as t, time; b=t.Barrier(2); print('ok')", None + ) sleep = 0.2 if overlap: codes = [ - f"start0=True; end0=False; time.sleep({sleep}); end0=True", - f"time.sleep({sleep/2}); assert start0; assert not end0; time.sleep({sleep}); assert end0", + f"b.wait(); start0=True; end0=False; time.sleep({sleep}); end0=True", + f"b.wait(); time.sleep({sleep/2}); assert start0; assert not end0; time.sleep({sleep}); assert end0", ] else: codes = [ - f"start0=True; end0=False; time.sleep({sleep}); assert end1", - f"time.sleep({sleep/2}); assert start0; assert not end0; end1=True", + f"b.wait(); start0=True; end0=False; time.sleep({sleep}); assert end1", + f"b.wait(); time.sleep({sleep/2}); assert start0; assert not end0; end1=True", ] msgs = [] @@ -174,8 +177,11 @@ def test_run_concurrently_timing(include_main_shell): create_subshell_helper(kc)["subshell_id"], ] - # Import time module before running time-sensitive subshell code. - execute_request_subshell_id(kc, "import time; print('ok')", None) + # Import time module before running time-sensitive subshell code + # and use threading.Barrier to synchronise start of subshell code. + execute_request_subshell_id( + kc, "import threading as t, time; b=t.Barrier(2); print('ok')", None + ) times = (0.2, 0.2) # Prepare messages, times are sleep times in seconds. @@ -183,7 +189,7 @@ def test_run_concurrently_timing(include_main_shell): # the execute_reply messages may overlap. msgs = [] for id, sleep in zip(subshell_ids, times): - code = f"time.sleep({sleep})" + code = f"b.wait(); time.sleep({sleep})" msg = kc.session.msg("execute_request", {"code": code}) msg["header"]["subshell_id"] = id msgs.append(msg) @@ -213,11 +219,17 @@ def test_execution_count(): with new_kernel() as kc: subshell_id = create_subshell_helper(kc)["subshell_id"] + # Import time module before running time-sensitive subshell code + # and use threading.Barrier to synchronise start of subshell code. + execute_request_subshell_id( + kc, "import threading as t, time; b=t.Barrier(2); print('ok')", None + ) + # Prepare messages times = (0.2, 0.1, 0.4, 0.15) # Sleep seconds msgs = [] - for id, sleep in zip((None, subshell_id, None, subshell_id), times): - code = f"import time; time.sleep({sleep})" + for i, (id, sleep) in enumerate(zip((None, subshell_id, None, subshell_id), times)): + code = f"b.wait(); time.sleep({sleep})" if i < 2 else f"time.sleep({sleep})" msg = kc.session.msg("execute_request", {"code": code}) msg["header"]["subshell_id"] = id msgs.append(msg) From 8c8d7d2b8bde64da5e22c89e58ee481e66986c08 Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Fri, 8 Nov 2024 03:13:07 -0800 Subject: [PATCH 38/97] Rely on intrsphinx_registry to keep intersphinx up to date. (#1290) --- docs/conf.py | 10 +++++----- pyproject.toml | 1 + 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/docs/conf.py b/docs/conf.py index 38a724b52..5a5f8ed5c 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -15,6 +15,8 @@ from pathlib import Path from typing import Any +from intersphinx_registry import get_intersphinx_mapping + # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. @@ -303,11 +305,9 @@ # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - "python": ("https://docs.python.org/3/", None), - "ipython": ("https://ipython.readthedocs.io/en/latest", None), - "jupyter": ("https://jupyter.readthedocs.io/en/latest", None), -} + + +intersphinx_mapping = get_intersphinx_mapping(packages={"ipython", "python", "jupyter"}) def setup(app): diff --git a/pyproject.toml b/pyproject.toml index e1d7b1d5b..675d9d875 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,6 +51,7 @@ docs = [ "sphinxcontrib_github_alt", "sphinxcontrib-spelling", "sphinx-autodoc-typehints", + "intersphinx_registry", "trio" ] test = [ From 8c4901d691b1f309da3b80eefad5af13d7418185 Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Thu, 14 Nov 2024 23:30:37 -0800 Subject: [PATCH 39/97] Misc type annotations (#1294) --- ipykernel/kernelapp.py | 17 ++++++++++------- pyproject.toml | 2 ++ 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/ipykernel/kernelapp.py b/ipykernel/kernelapp.py index 55efaa8e4..66b750b2b 100644 --- a/ipykernel/kernelapp.py +++ b/ipykernel/kernelapp.py @@ -16,6 +16,7 @@ from io import FileIO, TextIOWrapper from logging import StreamHandler from pathlib import Path +from typing import Optional import zmq import zmq.asyncio @@ -54,6 +55,7 @@ from .ipkernel import IPythonKernel from .parentpoller import ParentPollerUnix, ParentPollerWindows from .shellchannel import ShellChannelThread +from .thread import BaseThread from .zmqshell import ZMQInteractiveShell # ----------------------------------------------------------------------------- @@ -142,9 +144,10 @@ class IPKernelApp(BaseIPythonApplication, InteractiveShellApp, ConnectionFileMix debug_shell_socket = Any() stdin_socket = Any() iopub_socket = Any() - iopub_thread = Any() - control_thread = Any() - shell_channel_thread = Any() + + iopub_thread: Optional[IOPubThread] = Instance(IOPubThread, allow_none=True) # type:ignore[assignment] + control_thread: Optional[BaseThread] = Instance(BaseThread, allow_none=True) # type:ignore[assignment] + shell_channel_thread: Optional[BaseThread] = Instance(BaseThread, allow_none=True) # type:ignore[assignment] _ports = Dict() @@ -261,7 +264,7 @@ def _bind_socket(self, s, port): raise return None - def write_connection_file(self): + def write_connection_file(self, **kwargs: t.Any) -> None: """write connection info to JSON file""" cf = self.abs_connection_file connection_info = dict( @@ -401,15 +404,15 @@ def close(self): if self.heartbeat: self.log.debug("Closing heartbeat channel") self.heartbeat.context.term() - if self.iopub_thread: + if self.iopub_thread is not None: self.log.debug("Closing iopub channel") self.iopub_thread.stop() self.iopub_thread.close() - if self.control_thread and self.control_thread.is_alive(): + if self.control_thread is not None and self.control_thread.is_alive(): self.log.debug("Closing control thread") self.control_thread.stop() self.control_thread.join() - if self.shell_channel_thread and self.shell_channel_thread.is_alive(): + if self.shell_channel_thread is not None and self.shell_channel_thread.is_alive(): self.log.debug("Closing shell channel thread") self.shell_channel_thread.stop() self.shell_channel_thread.join() diff --git a/pyproject.toml b/pyproject.toml index 675d9d875..2360b6684 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -276,6 +276,8 @@ ignore = [ "G002", # `open()` should be replaced by `Path.open()` "PTH123", + # use `X | Y` for type annotations, this does not works for dynamic getting type hints on older python + "UP007", ] unfixable = [ # Don't touch print statements From 188f39c509f1056c3784aad6954498f31de955a3 Mon Sep 17 00:00:00 2001 From: David Brochart Date: Thu, 21 Nov 2024 13:16:35 +0100 Subject: [PATCH 40/97] Remove base setup (#1299) --- .github/workflows/ci.yml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 70b3bf405..98deaf12d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -36,8 +36,14 @@ jobs: - name: Checkout uses: actions/checkout@v4 - - name: Base Setup - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 + - uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: Install hatch + run: | + python --version + python -m pip install hatch - name: Run the tests timeout-minutes: 15 From b11d42c1f85e30dc87b73b71e79f994106ab82d4 Mon Sep 17 00:00:00 2001 From: David Brochart Date: Tue, 17 Dec 2024 09:23:45 +0100 Subject: [PATCH 41/97] Fix ipykernel install (#1310) --- ipykernel/kernelapp.py | 1 + 1 file changed, 1 insertion(+) diff --git a/ipykernel/kernelapp.py b/ipykernel/kernelapp.py index 66b750b2b..8bb047339 100644 --- a/ipykernel/kernelapp.py +++ b/ipykernel/kernelapp.py @@ -734,6 +734,7 @@ def start(self) -> None: """Start the application.""" if self.subapp is not None: self.subapp.start() + return if self.poller is not None: self.poller.start() backend = "trio" if self.trio_loop else "asyncio" From 6f2bc841641541dee8cfc78c4b6b0b199f90cf53 Mon Sep 17 00:00:00 2001 From: David Brochart Date: Fri, 20 Dec 2024 20:16:00 +0100 Subject: [PATCH 42/97] Fix test_print_to_correct_cell_from_child_thread (#1312) --- tests/test_kernel.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/test_kernel.py b/tests/test_kernel.py index 8efc3dcc1..2b379eb83 100644 --- a/tests/test_kernel.py +++ b/tests/test_kernel.py @@ -106,7 +106,9 @@ def child_target(): def parent_target(): sleep({interval}) - Thread(target=child_target).start() + thread = Thread(target=child_target) + thread.start() + thread.join() Thread(target=parent_target).start() """ From f3f2a6031605323fb361ccc7ba11181af7415eee Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Sun, 2 Feb 2025 23:51:22 -0800 Subject: [PATCH 43/97] More Informative assert (#1314) --- tests/test_subshells.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/tests/test_subshells.py b/tests/test_subshells.py index 141a3b7c4..419a6bd3b 100644 --- a/tests/test_subshells.py +++ b/tests/test_subshells.py @@ -125,6 +125,9 @@ def test_thread_ids(): delete_subshell_helper(kc, subshell_id) +@pytest.mark.xfail( + strict=False, reason="this randomly fail and make downstream testing less useful" +) @pytest.mark.parametrize("are_subshells", [(False, True), (True, False), (True, True)]) @pytest.mark.parametrize("overlap", [True, False]) def test_run_concurrently_sequence(are_subshells, overlap): @@ -166,7 +169,7 @@ def test_run_concurrently_sequence(are_subshells, overlap): delete_subshell_helper(kc, subshell_id) for reply in replies: - assert reply["content"]["status"] == "ok" + assert reply["content"]["status"] == "ok", reply @pytest.mark.parametrize("include_main_shell", [True, False]) From e64fb2e98b16b7c8b3b1a4eee0a83d4d2c467c47 Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Wed, 12 Feb 2025 05:48:01 -0800 Subject: [PATCH 44/97] Remove link to numfocus for funding. (#1320) --- pyproject.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 2360b6684..4de39428b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,7 +39,6 @@ dependencies = [ [project.urls] Homepage = "https://ipython.org" Documentation = "https://ipykernel.readthedocs.io" -Funding = "https://numfocus.org/donate" Source = "https://github.com/ipython/ipykernel" Tracker = "https://github.com/ipython/ipykernel/issues" From 4cc832edf26e1525e86d0c9a0e84d6216581781d Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Thu, 13 Feb 2025 10:05:14 +0100 Subject: [PATCH 45/97] Fix types lints (#1321) --- ipykernel/subshell_manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ipykernel/subshell_manager.py b/ipykernel/subshell_manager.py index 805d6f812..ca037e777 100644 --- a/ipykernel/subshell_manager.py +++ b/ipykernel/subshell_manager.py @@ -124,9 +124,9 @@ async def listen_from_control(self, subshell_task: t.Any) -> None: socket = self._control_shell_channel_socket while True: - request = await socket.recv_json() # type: ignore[misc] + request = await socket.recv_json() reply = await self._process_control_request(request, subshell_task) - await socket.send_json(reply) # type: ignore[func-returns-value] + await socket.send_json(reply) async def listen_from_subshells(self) -> None: """Listen for reply messages on inproc sockets of all subshells and resend From 9576a03aef1a2f9b245dbaf1233023e225bec09c Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Thu, 13 Feb 2025 11:03:25 +0100 Subject: [PATCH 46/97] Manually update mdformat pre-commit and run it. (#1327) --- .pre-commit-config.yaml | 2 +- CHANGELOG.md | 56 ++++++++++++++++++++--------------------- 2 files changed, 29 insertions(+), 29 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index cc2cfd9d8..0212b84c5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -27,7 +27,7 @@ repos: - id: check-github-workflows - repo: https://github.com/executablebooks/mdformat - rev: 0.7.17 + rev: 0.7.22 hooks: - id: mdformat additional_dependencies: diff --git a/CHANGELOG.md b/CHANGELOG.md index 09342c320..d314cf253 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,7 +59,7 @@ ### Maintenance and upkeep improvements -- \[6.x\] Update Release Scripts [#1251](https://github.com/ipython/ipykernel/pull/1251) ([@blink1073](https://github.com/blink1073)) +- [6.x] Update Release Scripts [#1251](https://github.com/ipython/ipykernel/pull/1251) ([@blink1073](https://github.com/blink1073)) ### Contributors to this release @@ -118,7 +118,7 @@ ### Bugs fixed -- Fix: ipykernel_launcher, delete absolute sys.path\[0\] [#1206](https://github.com/ipython/ipykernel/pull/1206) ([@stdll00](https://github.com/stdll00)) +- Fix: ipykernel_launcher, delete absolute sys.path[0] [#1206](https://github.com/ipython/ipykernel/pull/1206) ([@stdll00](https://github.com/stdll00)) ### Maintenance and upkeep improvements @@ -386,7 +386,7 @@ ### Enhancements made -- Support control\<>iopub messages to e.g. unblock comm_msg from command execution [#1114](https://github.com/ipython/ipykernel/pull/1114) ([@tkrabel-db](https://github.com/tkrabel-db)) +- Support control\<>iopub messages to e.g. unblock comm_msg from command execution [#1114](https://github.com/ipython/ipykernel/pull/1114) ([@tkrabel-db](https://github.com/tkrabel-db)) - Add outstream hook similar to display publisher [#1110](https://github.com/ipython/ipykernel/pull/1110) ([@maartenbreddels](https://github.com/maartenbreddels)) ### Maintenance and upkeep improvements @@ -796,10 +796,10 @@ ### Maintenance and upkeep improvements -- \[pre-commit.ci\] pre-commit autoupdate [#989](https://github.com/ipython/ipykernel/pull/989) ([@pre-commit-ci](https://github.com/pre-commit-ci)) -- \[pre-commit.ci\] pre-commit autoupdate [#985](https://github.com/ipython/ipykernel/pull/985) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- [pre-commit.ci] pre-commit autoupdate [#989](https://github.com/ipython/ipykernel/pull/989) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- [pre-commit.ci] pre-commit autoupdate [#985](https://github.com/ipython/ipykernel/pull/985) ([@pre-commit-ci](https://github.com/pre-commit-ci)) - Add python logo in svg format [#984](https://github.com/ipython/ipykernel/pull/984) ([@steff456](https://github.com/steff456)) -- \[pre-commit.ci\] pre-commit autoupdate [#982](https://github.com/ipython/ipykernel/pull/982) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- [pre-commit.ci] pre-commit autoupdate [#982](https://github.com/ipython/ipykernel/pull/982) ([@pre-commit-ci](https://github.com/pre-commit-ci)) ### Contributors to this release @@ -817,13 +817,13 @@ ### Maintenance and upkeep improvements -- \[pre-commit.ci\] pre-commit autoupdate [#978](https://github.com/ipython/ipykernel/pull/978) ([@pre-commit-ci](https://github.com/pre-commit-ci)) -- \[pre-commit.ci\] pre-commit autoupdate [#977](https://github.com/ipython/ipykernel/pull/977) ([@pre-commit-ci](https://github.com/pre-commit-ci)) -- \[pre-commit.ci\] pre-commit autoupdate [#976](https://github.com/ipython/ipykernel/pull/976) ([@pre-commit-ci](https://github.com/pre-commit-ci)) -- \[pre-commit.ci\] pre-commit autoupdate [#974](https://github.com/ipython/ipykernel/pull/974) ([@pre-commit-ci](https://github.com/pre-commit-ci)) -- \[pre-commit.ci\] pre-commit autoupdate [#971](https://github.com/ipython/ipykernel/pull/971) ([@pre-commit-ci](https://github.com/pre-commit-ci)) -- \[pre-commit.ci\] pre-commit autoupdate [#968](https://github.com/ipython/ipykernel/pull/968) ([@pre-commit-ci](https://github.com/pre-commit-ci)) -- \[pre-commit.ci\] pre-commit autoupdate [#966](https://github.com/ipython/ipykernel/pull/966) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- [pre-commit.ci] pre-commit autoupdate [#978](https://github.com/ipython/ipykernel/pull/978) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- [pre-commit.ci] pre-commit autoupdate [#977](https://github.com/ipython/ipykernel/pull/977) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- [pre-commit.ci] pre-commit autoupdate [#976](https://github.com/ipython/ipykernel/pull/976) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- [pre-commit.ci] pre-commit autoupdate [#974](https://github.com/ipython/ipykernel/pull/974) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- [pre-commit.ci] pre-commit autoupdate [#971](https://github.com/ipython/ipykernel/pull/971) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- [pre-commit.ci] pre-commit autoupdate [#968](https://github.com/ipython/ipykernel/pull/968) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- [pre-commit.ci] pre-commit autoupdate [#966](https://github.com/ipython/ipykernel/pull/966) ([@pre-commit-ci](https://github.com/pre-commit-ci)) ### Contributors to this release @@ -841,9 +841,9 @@ ### Maintenance and upkeep improvements -- \[pre-commit.ci\] pre-commit autoupdate [#962](https://github.com/ipython/ipykernel/pull/962) ([@pre-commit-ci](https://github.com/pre-commit-ci)) -- \[pre-commit.ci\] pre-commit autoupdate [#961](https://github.com/ipython/ipykernel/pull/961) ([@pre-commit-ci](https://github.com/pre-commit-ci)) -- \[pre-commit.ci\] pre-commit autoupdate [#960](https://github.com/ipython/ipykernel/pull/960) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- [pre-commit.ci] pre-commit autoupdate [#962](https://github.com/ipython/ipykernel/pull/962) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- [pre-commit.ci] pre-commit autoupdate [#961](https://github.com/ipython/ipykernel/pull/961) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- [pre-commit.ci] pre-commit autoupdate [#960](https://github.com/ipython/ipykernel/pull/960) ([@pre-commit-ci](https://github.com/pre-commit-ci)) ### Contributors to this release @@ -863,7 +863,7 @@ - Back to top-level tornado IOLoop [#958](https://github.com/ipython/ipykernel/pull/958) ([@minrk](https://github.com/minrk)) - Explicitly require pyzmq >= 17 [#957](https://github.com/ipython/ipykernel/pull/957) ([@minrk](https://github.com/minrk)) -- \[pre-commit.ci\] pre-commit autoupdate [#954](https://github.com/ipython/ipykernel/pull/954) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- [pre-commit.ci] pre-commit autoupdate [#954](https://github.com/ipython/ipykernel/pull/954) ([@pre-commit-ci](https://github.com/pre-commit-ci)) ### Contributors to this release @@ -887,7 +887,7 @@ ### Maintenance and upkeep improvements - Fix sphinx 5.0 support [#951](https://github.com/ipython/ipykernel/pull/951) ([@blink1073](https://github.com/blink1073)) -- \[pre-commit.ci\] pre-commit autoupdate [#950](https://github.com/ipython/ipykernel/pull/950) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- [pre-commit.ci] pre-commit autoupdate [#950](https://github.com/ipython/ipykernel/pull/950) ([@pre-commit-ci](https://github.com/pre-commit-ci)) ### Contributors to this release @@ -906,18 +906,18 @@ ### Maintenance and upkeep improvements -- \[pre-commit.ci\] pre-commit autoupdate [#945](https://github.com/ipython/ipykernel/pull/945) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- [pre-commit.ci] pre-commit autoupdate [#945](https://github.com/ipython/ipykernel/pull/945) ([@pre-commit-ci](https://github.com/pre-commit-ci)) - Clean up typings [#939](https://github.com/ipython/ipykernel/pull/939) ([@blink1073](https://github.com/blink1073)) -- \[pre-commit.ci\] pre-commit autoupdate [#938](https://github.com/ipython/ipykernel/pull/938) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- [pre-commit.ci] pre-commit autoupdate [#938](https://github.com/ipython/ipykernel/pull/938) ([@pre-commit-ci](https://github.com/pre-commit-ci)) - Clean up types [#933](https://github.com/ipython/ipykernel/pull/933) ([@blink1073](https://github.com/blink1073)) -- \[pre-commit.ci\] pre-commit autoupdate [#932](https://github.com/ipython/ipykernel/pull/932) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- [pre-commit.ci] pre-commit autoupdate [#932](https://github.com/ipython/ipykernel/pull/932) ([@pre-commit-ci](https://github.com/pre-commit-ci)) - Switch to hatch backend [#931](https://github.com/ipython/ipykernel/pull/931) ([@blink1073](https://github.com/blink1073)) -- \[pre-commit.ci\] pre-commit autoupdate [#928](https://github.com/ipython/ipykernel/pull/928) ([@pre-commit-ci](https://github.com/pre-commit-ci)) -- \[pre-commit.ci\] pre-commit autoupdate [#926](https://github.com/ipython/ipykernel/pull/926) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- [pre-commit.ci] pre-commit autoupdate [#928](https://github.com/ipython/ipykernel/pull/928) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- [pre-commit.ci] pre-commit autoupdate [#926](https://github.com/ipython/ipykernel/pull/926) ([@pre-commit-ci](https://github.com/pre-commit-ci)) - Allow enforce PR label workflow to add labels [#921](https://github.com/ipython/ipykernel/pull/921) ([@blink1073](https://github.com/blink1073)) -- \[pre-commit.ci\] pre-commit autoupdate [#920](https://github.com/ipython/ipykernel/pull/920) ([@pre-commit-ci](https://github.com/pre-commit-ci)) -- \[pre-commit.ci\] pre-commit autoupdate [#919](https://github.com/ipython/ipykernel/pull/919) ([@pre-commit-ci](https://github.com/pre-commit-ci)) -- \[pre-commit.ci\] pre-commit autoupdate [#917](https://github.com/ipython/ipykernel/pull/917) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- [pre-commit.ci] pre-commit autoupdate [#920](https://github.com/ipython/ipykernel/pull/920) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- [pre-commit.ci] pre-commit autoupdate [#919](https://github.com/ipython/ipykernel/pull/919) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- [pre-commit.ci] pre-commit autoupdate [#917](https://github.com/ipython/ipykernel/pull/917) ([@pre-commit-ci](https://github.com/pre-commit-ci)) ### Contributors to this release @@ -942,7 +942,7 @@ - Add basic mypy support [#913](https://github.com/ipython/ipykernel/pull/913) ([@blink1073](https://github.com/blink1073)) - Clean up pre-commit [#911](https://github.com/ipython/ipykernel/pull/911) ([@blink1073](https://github.com/blink1073)) - Update setup.py [#909](https://github.com/ipython/ipykernel/pull/909) ([@tlinhart](https://github.com/tlinhart)) -- \[pre-commit.ci\] pre-commit autoupdate [#906](https://github.com/ipython/ipykernel/pull/906) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- [pre-commit.ci] pre-commit autoupdate [#906](https://github.com/ipython/ipykernel/pull/906) ([@pre-commit-ci](https://github.com/pre-commit-ci)) ### Contributors to this release @@ -1360,7 +1360,7 @@ - Add watchfd keyword to InProcessKernel OutStream initialization [#727](https://github.com/ipython/ipykernel/pull/727) ([@rayosborn](https://github.com/rayosborn)) - Fix typo in eventloops.py [#711](https://github.com/ipython/ipykernel/pull/711) ([@selasley](https://github.com/selasley)) -- \[bugfix\] fix in setup.py (comma before appnope) [#709](https://github.com/ipython/ipykernel/pull/709) ([@jstriebel](https://github.com/jstriebel)) +- [bugfix] fix in setup.py (comma before appnope) [#709](https://github.com/ipython/ipykernel/pull/709) ([@jstriebel](https://github.com/jstriebel)) ### Maintenance and upkeep improvements From 042fa04ac0dfefe06ef51fd24eaefc81fc1600de Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Thu, 13 Feb 2025 11:04:59 +0100 Subject: [PATCH 47/97] Manually update Codespell and fix new errors. (#1328) --- .pre-commit-config.yaml | 2 +- ipykernel/trio_runner.py | 2 +- tests/utils.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0212b84c5..dc1008333 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -61,7 +61,7 @@ repos: additional_dependencies: [black==23.7.0] - repo: https://github.com/codespell-project/codespell - rev: "v2.2.6" + rev: "v2.4.1" hooks: - id: codespell args: ["-L", "sur,nd"] diff --git a/ipykernel/trio_runner.py b/ipykernel/trio_runner.py index 45f738acb..a641feba8 100644 --- a/ipykernel/trio_runner.py +++ b/ipykernel/trio_runner.py @@ -29,7 +29,7 @@ def initialize(self, kernel, io_loop): bg_thread.start() def interrupt(self, signum, frame): - """Interuppt the runner.""" + """Interrupt the runner.""" if self._cell_cancel_scope: self._cell_cancel_scope.cancel() else: diff --git a/tests/utils.py b/tests/utils.py index b20e8fcb2..5bf98a051 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -166,7 +166,7 @@ def stop_global_kernel(): def new_kernel(argv=None): """Context manager for a new kernel in a subprocess - Should only be used for tests where the kernel must not be re-used. + Should only be used for tests where the kernel must not be reused. Returns ------- From a2f3d4b81135384f7389ff76e0848d95fdee657c Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Thu, 13 Feb 2025 11:28:04 +0100 Subject: [PATCH 48/97] Remove dead code (#1332) --- tests/_asyncio_utils.py | 17 ----------------- tests/test_eventloop.py | 6 ------ 2 files changed, 23 deletions(-) delete mode 100644 tests/_asyncio_utils.py diff --git a/tests/_asyncio_utils.py b/tests/_asyncio_utils.py deleted file mode 100644 index 43e3229fa..000000000 --- a/tests/_asyncio_utils.py +++ /dev/null @@ -1,17 +0,0 @@ -"""test utilities that use async/await syntax - -a separate file to avoid syntax errors on Python 2 -""" - -import asyncio - - -def async_func(): - """Simple async function to schedule a task on the current eventloop""" - loop = asyncio.get_event_loop() - assert loop.is_running() - - async def task(): - await asyncio.sleep(1) - - loop.create_task(task()) diff --git a/tests/test_eventloop.py b/tests/test_eventloop.py index 62a7f8ba3..57a2ca913 100644 --- a/tests/test_eventloop.py +++ b/tests/test_eventloop.py @@ -54,12 +54,6 @@ def _setup_env(): KM.shutdown_kernel(now=True) -async_code = """ -from tests._asyncio_utils import async_func -async_func() -""" - - windows_skip = pytest.mark.skipif(os.name == "nt", reason="causing failures on windows") From 3d4df3b6c18c16c6efcc1feb226e6c26f7bf1b28 Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Thu, 13 Feb 2025 11:32:03 +0100 Subject: [PATCH 49/97] minor code reformating valid ruff 0.9.6 (#1330) --- ipykernel/connect.py | 4 ++-- ipykernel/iostream.py | 12 +++++++----- tests/test_message_spec.py | 1 - tests/test_subshells.py | 4 ++-- 4 files changed, 11 insertions(+), 10 deletions(-) diff --git a/ipykernel/connect.py b/ipykernel/connect.py index 59d36452d..117484599 100644 --- a/ipykernel/connect.py +++ b/ipykernel/connect.py @@ -133,8 +133,8 @@ def connect_qtconsole( __all__ = [ - "write_connection_file", + "connect_qtconsole", "get_connection_file", "get_connection_info", - "connect_qtconsole", + "write_connection_file", ] diff --git a/ipykernel/iostream.py b/ipykernel/iostream.py index d81710175..65495efb5 100644 --- a/ipykernel/iostream.py +++ b/ipykernel/iostream.py @@ -501,11 +501,13 @@ def __init__( self._local = local() if ( - watchfd - and ( - (sys.platform.startswith("linux") or sys.platform.startswith("darwin")) - # Pytest set its own capture. Don't redirect from within pytest. - and ("PYTEST_CURRENT_TEST" not in os.environ) + ( + watchfd + and ( + (sys.platform.startswith("linux") or sys.platform.startswith("darwin")) + # Pytest set its own capture. Don't redirect from within pytest. + and ("PYTEST_CURRENT_TEST" not in os.environ) + ) ) # allow forcing watchfd (mainly for tests) or watchfd == "force" diff --git a/tests/test_message_spec.py b/tests/test_message_spec.py index 694de44b1..9dd58d2f2 100644 --- a/tests/test_message_spec.py +++ b/tests/test_message_spec.py @@ -34,7 +34,6 @@ def _setup_env(): class Reference(HasTraits): - """ Base class for message spec specification testing. diff --git a/tests/test_subshells.py b/tests/test_subshells.py index 419a6bd3b..0b1c1f1aa 100644 --- a/tests/test_subshells.py +++ b/tests/test_subshells.py @@ -147,12 +147,12 @@ def test_run_concurrently_sequence(are_subshells, overlap): if overlap: codes = [ f"b.wait(); start0=True; end0=False; time.sleep({sleep}); end0=True", - f"b.wait(); time.sleep({sleep/2}); assert start0; assert not end0; time.sleep({sleep}); assert end0", + f"b.wait(); time.sleep({sleep / 2}); assert start0; assert not end0; time.sleep({sleep}); assert end0", ] else: codes = [ f"b.wait(); start0=True; end0=False; time.sleep({sleep}); assert end1", - f"b.wait(); time.sleep({sleep/2}); assert start0; assert not end0; end1=True", + f"b.wait(); time.sleep({sleep / 2}); assert start0; assert not end0; end1=True", ] msgs = [] From 180dd4f32c98d8a2a3a84e5c5cbbe4161d4b8166 Mon Sep 17 00:00:00 2001 From: limwz01 <117669574+limwz01@users.noreply.github.com> Date: Thu, 13 Feb 2025 18:35:20 +0800 Subject: [PATCH 50/97] properly close OutStream and various fixes (#1305) --- ipykernel/_version.py | 2 + ipykernel/embed.py | 1 + ipykernel/inprocess/channels.py | 1 + ipykernel/iostream.py | 14 +++--- ipykernel/kernelapp.py | 76 +++++++++++++++++++++++++-------- ipykernel/pickleutil.py | 2 + ipykernel/thread.py | 2 + tests/test_kernelapp.py | 1 + 8 files changed, 74 insertions(+), 25 deletions(-) diff --git a/ipykernel/_version.py b/ipykernel/_version.py index 5907d150c..b4c5b1dab 100644 --- a/ipykernel/_version.py +++ b/ipykernel/_version.py @@ -1,6 +1,8 @@ """ store the current version info of the server. """ +from __future__ import annotations + import re # Version string must appear intact for hatch versioning diff --git a/ipykernel/embed.py b/ipykernel/embed.py index 3e4abd390..ad22e2a1e 100644 --- a/ipykernel/embed.py +++ b/ipykernel/embed.py @@ -55,3 +55,4 @@ def embed_kernel(module=None, local_ns=None, **kwargs): app.kernel.user_ns = local_ns app.shell.set_completer_frame() # type:ignore[union-attr] app.start() + app.close() diff --git a/ipykernel/inprocess/channels.py b/ipykernel/inprocess/channels.py index 4c01c5bcb..a886f6c81 100644 --- a/ipykernel/inprocess/channels.py +++ b/ipykernel/inprocess/channels.py @@ -2,6 +2,7 @@ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. +from __future__ import annotations from jupyter_client.channelsabc import HBChannelABC diff --git a/ipykernel/iostream.py b/ipykernel/iostream.py index 65495efb5..dfaee56f3 100644 --- a/ipykernel/iostream.py +++ b/ipykernel/iostream.py @@ -398,8 +398,8 @@ def fileno(self): """ Things like subprocess will peak and write to the fileno() of stderr/stdout. """ - if getattr(self, "_original_stdstream_copy", None) is not None: - return self._original_stdstream_copy + if getattr(self, "_original_stdstream_fd", None) is not None: + return self._original_stdstream_fd msg = "fileno" raise io.UnsupportedOperation(msg) @@ -529,10 +529,7 @@ def __init__( # echo on the _copy_ we made during # this is the actual terminal FD now echo = io.TextIOWrapper( - io.FileIO( - self._original_stdstream_copy, - "w", - ) + io.FileIO(self._original_stdstream_copy, "w", closefd=False) ) self.echo = echo else: @@ -597,9 +594,10 @@ def close(self): self._should_watch = False # thread won't wake unless there's something to read # writing something after _should_watch will not be echoed - os.write(self._original_stdstream_fd, b"\0") - if self.watch_fd_thread is not None: + if self.watch_fd_thread is not None and self.watch_fd_thread.is_alive(): + os.write(self._original_stdstream_fd, b"\0") self.watch_fd_thread.join() + self.echo = None # restore original FDs os.dup2(self._original_stdstream_copy, self._original_stdstream_fd) os.close(self._original_stdstream_copy) diff --git a/ipykernel/kernelapp.py b/ipykernel/kernelapp.py index 8bb047339..bb847e65e 100644 --- a/ipykernel/kernelapp.py +++ b/ipykernel/kernelapp.py @@ -151,6 +151,11 @@ class IPKernelApp(BaseIPythonApplication, InteractiveShellApp, ConnectionFileMix _ports = Dict() + _original_io = Any() + _log_map = Any() + _io_modified = Bool(False) + _blackhole = Any() + subcommands = { "install": ( "ipykernel.kernelspec.InstallIPythonKernelSpecApp", @@ -470,41 +475,53 @@ def log_connection_info(self): def init_blackhole(self): """redirects stdout/stderr to devnull if necessary""" + self._save_io() if self.no_stdout or self.no_stderr: - blackhole = open(os.devnull, "w") # noqa: SIM115 + # keep reference around so that it would not accidentally close the pipe fds + self._blackhole = open(os.devnull, "w") # noqa: SIM115 if self.no_stdout: - sys.stdout = sys.__stdout__ = blackhole # type:ignore[misc] + if sys.stdout is not None: + sys.stdout.flush() + sys.stdout = self._blackhole if self.no_stderr: - sys.stderr = sys.__stderr__ = blackhole # type:ignore[misc] + if sys.stderr is not None: + sys.stderr.flush() + sys.stderr = self._blackhole def init_io(self): """Redirect input streams and set a display hook.""" + self._save_io() if self.outstream_class: outstream_factory = import_item(str(self.outstream_class)) - if sys.stdout is not None: - sys.stdout.flush() - e_stdout = None if self.quiet else sys.__stdout__ - e_stderr = None if self.quiet else sys.__stderr__ + e_stdout = None if self.quiet else sys.stdout + e_stderr = None if self.quiet else sys.stderr if not self.capture_fd_output: outstream_factory = partial(outstream_factory, watchfd=False) + if sys.stdout is not None: + sys.stdout.flush() sys.stdout = outstream_factory(self.session, self.iopub_thread, "stdout", echo=e_stdout) + if sys.stderr is not None: sys.stderr.flush() sys.stderr = outstream_factory(self.session, self.iopub_thread, "stderr", echo=e_stderr) + if hasattr(sys.stderr, "_original_stdstream_copy"): for handler in self.log.handlers: - if isinstance(handler, StreamHandler) and (handler.stream.buffer.fileno() == 2): + if ( + isinstance(handler, StreamHandler) + and (buffer := getattr(handler.stream, "buffer", None)) + and (fileno := getattr(buffer, "fileno", None)) + and fileno() == sys.stderr._original_stdstream_fd # type:ignore[attr-defined] + ): self.log.debug("Seeing logger to stderr, rerouting to raw filedescriptor.") - - handler.stream = TextIOWrapper( - FileIO( - sys.stderr._original_stdstream_copy, - "w", - ) + io_wrapper = TextIOWrapper( + FileIO(sys.stderr._original_stdstream_copy, "w", closefd=False) ) + self._log_map[id(io_wrapper)] = handler.stream + handler.stream = io_wrapper if self.displayhook_class: displayhook_factory = import_item(str(self.displayhook_class)) self.displayhook = displayhook_factory(self.session, self.iopub_socket) @@ -512,14 +529,39 @@ def init_io(self): self.patch_io() + def _save_io(self): + if not self._io_modified: + self._original_io = sys.stdout, sys.stderr, sys.displayhook + self._log_map = {} + self._io_modified = True + def reset_io(self): """restore original io restores state after init_io """ - sys.stdout = sys.__stdout__ - sys.stderr = sys.__stderr__ - sys.displayhook = sys.__displayhook__ + if not self._io_modified: + return + stdout, stderr, displayhook = sys.stdout, sys.stderr, sys.displayhook + sys.stdout, sys.stderr, sys.displayhook = self._original_io + self._original_io = None + self._io_modified = False + if finish_displayhook := getattr(displayhook, "finish_displayhook", None): + finish_displayhook() + if hasattr(stderr, "_original_stdstream_copy"): + for handler in self.log.handlers: + if orig_stream := self._log_map.get(id(handler.stream)): + self.log.debug("Seeing modified logger, rerouting back to stderr") + handler.stream = orig_stream + self._log_map = None + if self.outstream_class: + outstream_factory = import_item(str(self.outstream_class)) + if isinstance(stderr, outstream_factory): + stderr.close() + if isinstance(stdout, outstream_factory): + stdout.close() + if self._blackhole: + self._blackhole.close() def patch_io(self): """Patch important libraries that can't handle sys.stdout forwarding""" diff --git a/ipykernel/pickleutil.py b/ipykernel/pickleutil.py index 4ffa5262e..15fc0e675 100644 --- a/ipykernel/pickleutil.py +++ b/ipykernel/pickleutil.py @@ -2,6 +2,8 @@ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. +from __future__ import annotations + import copy import pickle import sys diff --git a/ipykernel/thread.py b/ipykernel/thread.py index 40509eced..b3b33d320 100644 --- a/ipykernel/thread.py +++ b/ipykernel/thread.py @@ -1,4 +1,6 @@ """Base class for threads.""" +from __future__ import annotations + import typing as t from threading import Event, Thread diff --git a/tests/test_kernelapp.py b/tests/test_kernelapp.py index 05f6e5579..ec91687f4 100644 --- a/tests/test_kernelapp.py +++ b/tests/test_kernelapp.py @@ -31,6 +31,7 @@ def test_blackhole(): app.no_stderr = True app.no_stdout = True app.init_blackhole() + app.close() def test_start_app(): From 69d69b5c7b775ba4c01218932507f3b6fe9c7a85 Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Thu, 13 Feb 2025 12:26:51 +0100 Subject: [PATCH 51/97] Manually update pre-commit hooks that do not trigger new errors/fixes. (#1326) --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index dc1008333..1d6e86a19 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,7 +4,7 @@ ci: repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 + rev: v5.0.0 hooks: - id: check-case-conflict - id: check-ast @@ -22,7 +22,7 @@ repos: - id: trailing-whitespace - repo: https://github.com/python-jsonschema/check-jsonschema - rev: 0.27.4 + rev: 0.31.1 hooks: - id: check-github-workflows @@ -55,7 +55,7 @@ repos: ] - repo: https://github.com/adamchainz/blacken-docs - rev: "1.16.0" + rev: "1.19.1" hooks: - id: blacken-docs additional_dependencies: [black==23.7.0] From f8e7b8e952bea99b68300e9b2913fa366d3d814e Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Thu, 13 Feb 2025 12:32:39 +0100 Subject: [PATCH 52/97] Copy payloadpage.page from IPython (#1317) --- ipykernel/zmqshell.py | 32 ++++++++++++++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) diff --git a/ipykernel/zmqshell.py b/ipykernel/zmqshell.py index 3f97e8170..268d826e2 100644 --- a/ipykernel/zmqshell.py +++ b/ipykernel/zmqshell.py @@ -20,7 +20,7 @@ import warnings from pathlib import Path -from IPython.core import page, payloadpage +from IPython.core import page from IPython.core.autocall import ZMQExitAutocall from IPython.core.displaypub import DisplayPublisher from IPython.core.error import UsageError @@ -541,10 +541,38 @@ def init_environment(self): env["PAGER"] = "cat" env["GIT_PAGER"] = "cat" + def payloadpage_page(self, strg, start=0, screen_lines=0, pager_cmd=None): + """Print a string, piping through a pager. + + This version ignores the screen_lines and pager_cmd arguments and uses + IPython's payload system instead. + + Parameters + ---------- + strg : str or mime-dict + Text to page, or a mime-type keyed dict of already formatted data. + start : int + Starting line at which to place the display. + """ + + # Some routines may auto-compute start offsets incorrectly and pass a + # negative value. Offset to 0 for robustness. + start = max(0, start) + + data = strg if isinstance(strg, dict) else {"text/plain": strg} + + payload = dict( + source="page", + data=data, + start=start, + ) + assert self.payload_manager is not None + self.payload_manager.write_payload(payload) + def init_hooks(self): """Initialize hooks.""" super().init_hooks() - self.set_hook("show_in_pager", page.as_hook(payloadpage.page), 99) + self.set_hook("show_in_pager", page.as_hook(self.payloadpage_page), 99) def init_data_pub(self): """Delay datapub init until request, for deprecation warnings""" From 4f42f033bbcd6b0313d1d9f521582bb75b7da799 Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Thu, 13 Feb 2025 13:42:32 +0100 Subject: [PATCH 53/97] Try to force precommit-ci to send autoupdate PRs. (#1325) Co-authored-by: Min RK --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1d6e86a19..58aa38cea 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ ci: - autoupdate_schedule: monthly autoupdate_commit_msg: "chore: update pre-commit hooks" + autoupdate_schedule: weekly repos: - repo: https://github.com/pre-commit/pre-commit-hooks From 3867c337e6a601e070d416cf0cdc0507a926c3ad Mon Sep 17 00:00:00 2001 From: Stephen Macke Date: Thu, 13 Feb 2025 23:23:09 -0800 Subject: [PATCH 54/97] make debugger class configurable (#1307) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- ipykernel/ipkernel.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/ipykernel/ipkernel.py b/ipykernel/ipkernel.py index 48efa6cd6..5c4501698 100644 --- a/ipykernel/ipkernel.py +++ b/ipykernel/ipkernel.py @@ -71,6 +71,12 @@ class IPythonKernel(KernelBase): shell = Instance("IPython.core.interactiveshell.InteractiveShellABC", allow_none=True) shell_class = Type(ZMQInteractiveShell) + # use fully-qualified name to ensure lazy import and prevent the issue from + # https://github.com/ipython/ipykernel/issues/1198 + debugger_class = Type("ipykernel.debugger.Debugger") + + compiler_class = Type(XCachingCompiler) + use_experimental_completions = Bool( True, help="Set this flag to False to deactivate the use of experimental IPython completion APIs.", @@ -110,11 +116,11 @@ def __init__(self, **kwargs): self.executing_blocking_code_in_main_shell = False - from .debugger import Debugger, _is_debugpy_available + from .debugger import _is_debugpy_available # Initialize the Debugger if _is_debugpy_available: - self.debugger = Debugger( + self.debugger = self.debugger_class( self.log, self.debugpy_socket, self._publish_debug_event, @@ -130,7 +136,7 @@ def __init__(self, **kwargs): user_module=self.user_module, user_ns=self.user_ns, kernel=self, - compiler_class=XCachingCompiler, + compiler_class=self.compiler_class, ) self.shell.displayhook.session = self.session # type:ignore[attr-defined] From 521549a46040f8aaf195e3ca9aa411101ec17419 Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Fri, 14 Feb 2025 08:24:02 +0100 Subject: [PATCH 55/97] Remove downstream_check (#1318) --- .github/workflows/downstream.yml | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/.github/workflows/downstream.yml b/.github/workflows/downstream.yml index 6d5d08ec5..378bfaac2 100644 --- a/.github/workflows/downstream.yml +++ b/.github/workflows/downstream.yml @@ -148,20 +148,3 @@ jobs: run: | cd ${GITHUB_WORKSPACE}/../spyder-kernels xvfb-run --auto-servernum ${pythonLocation}/bin/python -m pytest -x -vv -s --full-trace --color=yes spyder_kernels - - downstream_check: # This job does nothing and is only used for the branch protection - if: always() - needs: - - nbclient - - ipywidgets - - jupyter_client - - ipyparallel - - jupyter_kernel_test - - spyder_kernels - - qtconsole - runs-on: ubuntu-latest - steps: - - name: Decide whether the needed jobs succeeded or failed - uses: re-actors/alls-green@release/v1 - with: - jobs: ${{ toJSON(needs) }} From e6a688d1c13ba9ff9f2c9f8ce5d75264fc988822 Mon Sep 17 00:00:00 2001 From: Min RK Date: Fri, 14 Feb 2025 08:28:10 +0100 Subject: [PATCH 56/97] Remove implicit bind_kernel in `%qtconsole` (#1315) --- ipykernel/zmqshell.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/ipykernel/zmqshell.py b/ipykernel/zmqshell.py index 268d826e2..c5390df34 100644 --- a/ipykernel/zmqshell.py +++ b/ipykernel/zmqshell.py @@ -398,14 +398,6 @@ def qtconsole(self, arg_s): Useful for connecting a qtconsole to running notebooks, for better debugging. """ - - # %qtconsole should imply bind_kernel for engines: - # FIXME: move to ipyparallel Kernel subclass - if "ipyparallel" in sys.modules: - from ipyparallel import bind_kernel - - bind_kernel() - try: connect_qtconsole(argv=arg_split(arg_s, os.name == "posix")) except Exception as e: From eb0aee6e732d7a361ec037a739c3cef6c84e091d Mon Sep 17 00:00:00 2001 From: Ian Thomas Date: Fri, 14 Feb 2025 07:31:15 +0000 Subject: [PATCH 57/97] Use supported_features=['debugger'] in kernel info reply (#1296) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- ipykernel/kernelbase.py | 15 ++++++++++----- tests/test_debugger.py | 11 +++++++++++ 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/ipykernel/kernelbase.py b/ipykernel/kernelbase.py index d496e0c91..17d0d6cf8 100644 --- a/ipykernel/kernelbase.py +++ b/ipykernel/kernelbase.py @@ -880,18 +880,23 @@ async def connect_request(self, socket, ident, parent): @property def kernel_info(self): - info = { + from .debugger import _is_debugpy_available + + supported_features: list[str] = [] + if self._supports_kernel_subshells: + supported_features.append("kernel subshells") + if _is_debugpy_available: + supported_features.append("debugger") + + return { "protocol_version": kernel_protocol_version, "implementation": self.implementation, "implementation_version": self.implementation_version, "language_info": self.language_info, "banner": self.banner, "help_links": self.help_links, - "supported_features": [], + "supported_features": supported_features, } - if self._supports_kernel_subshells: - info["supported_features"] = ["kernel subshells"] - return info async def kernel_info_request(self, socket, ident, parent): """Handle a kernel info request.""" diff --git a/tests/test_debugger.py b/tests/test_debugger.py index fa64d2d09..4646fb49d 100644 --- a/tests/test_debugger.py +++ b/tests/test_debugger.py @@ -96,6 +96,17 @@ def test_debug_initialize(kernel): assert reply == {} +def test_supported_features(kernel_with_debug): + kernel_with_debug.kernel_info() + reply = kernel_with_debug.get_shell_msg(timeout=TIMEOUT) + supported_features = reply["content"]["supported_features"] + + if debugpy: + assert "debugger" in supported_features + else: + assert "debugger" not in supported_features + + def test_attach_debug(kernel_with_debug): reply = wait_for_debug_request( kernel_with_debug, "evaluate", {"expression": "'a' + 'b'", "context": "repl"} From 230349d0b83e8481ee1e43d19cdd1b93df13a1ac Mon Sep 17 00:00:00 2001 From: David Brochart Date: Fri, 14 Feb 2025 08:35:38 +0100 Subject: [PATCH 58/97] Replace BaseThread's add_task with start_soon (#1300) This PR replaces a BaseThread's add_task() method with start_soon(). The new name is less confusing as it's the same as in AnyIO, and it allows to start a task in the thread even after the thread has been started. We also get rid of _IOPubThread, which has no reason to be different than a BaseThread. (from #1291) --- ipykernel/iostream.py | 46 +++++-------------------- ipykernel/kernelbase.py | 11 +++--- ipykernel/subshell_manager.py | 5 +-- ipykernel/thread.py | 63 ++++++++++++++++++++++++++++------- 4 files changed, 70 insertions(+), 55 deletions(-) diff --git a/ipykernel/iostream.py b/ipykernel/iostream.py index dfaee56f3..cb4fc0525 100644 --- a/ipykernel/iostream.py +++ b/ipykernel/iostream.py @@ -16,13 +16,15 @@ from binascii import b2a_hex from collections import defaultdict, deque from io import StringIO, TextIOBase -from threading import Event, Thread, local +from threading import local from typing import Any, Callable import zmq -from anyio import create_task_group, run, sleep, to_thread +from anyio import sleep from jupyter_client.session import extract_header +from .thread import BaseThread + # ----------------------------------------------------------------------------- # Globals # ----------------------------------------------------------------------------- @@ -37,38 +39,6 @@ # ----------------------------------------------------------------------------- -class _IOPubThread(Thread): - """A thread for a IOPub.""" - - def __init__(self, tasks, **kwargs): - """Initialize the thread.""" - super().__init__(name="IOPub", **kwargs) - self._tasks = tasks - self.pydev_do_not_trace = True - self.is_pydev_daemon_thread = True - self.daemon = True - self.__stop = Event() - - def run(self): - """Run the thread.""" - self.name = "IOPub" - run(self._main) - - async def _main(self): - async with create_task_group() as tg: - for task in self._tasks: - tg.start_soon(task) - await to_thread.run_sync(self.__stop.wait) - tg.cancel_scope.cancel() - - def stop(self): - """Stop the thread. - - This method is threadsafe. - """ - self.__stop.set() - - class IOPubThread: """An object for sending IOPub messages in a background thread @@ -111,7 +81,9 @@ def __init__(self, socket, pipe=False): tasks = [self._handle_event, self._run_event_pipe_gc] if pipe: tasks.append(self._handle_pipe_msgs) - self.thread = _IOPubThread(tasks) + self.thread = BaseThread(name="IOPub", daemon=True) + for task in tasks: + self.thread.start_soon(task) def _setup_event_pipe(self): """Create the PULL socket listening for events that should fire in this thread.""" @@ -181,7 +153,7 @@ async def _handle_event(self): event_f = self._events.popleft() event_f() except Exception: - if self.thread.__stop.is_set(): + if self.thread.stopped.is_set(): return raise @@ -215,7 +187,7 @@ async def _handle_pipe_msgs(self): while True: await self._handle_pipe_msg() except Exception: - if self.thread.__stop.is_set(): + if self.thread.stopped.is_set(): return raise diff --git a/ipykernel/kernelbase.py b/ipykernel/kernelbase.py index 17d0d6cf8..6cd80991c 100644 --- a/ipykernel/kernelbase.py +++ b/ipykernel/kernelbase.py @@ -16,6 +16,7 @@ import uuid import warnings from datetime import datetime +from functools import partial from signal import SIGINT, SIGTERM, Signals from .thread import CONTROL_THREAD_NAME @@ -529,7 +530,7 @@ async def start(self, *, task_status: TaskStatus = TASK_STATUS_IGNORED) -> None: self.control_stop = threading.Event() if not self._is_test and self.control_socket is not None: if self.control_thread: - self.control_thread.add_task(self.control_main) + self.control_thread.start_soon(self.control_main) self.control_thread.start() else: tg.start_soon(self.control_main) @@ -544,9 +545,11 @@ async def start(self, *, task_status: TaskStatus = TASK_STATUS_IGNORED) -> None: # Assign tasks to and start shell channel thread. manager = self.shell_channel_thread.manager - self.shell_channel_thread.add_task(self.shell_channel_thread_main) - self.shell_channel_thread.add_task(manager.listen_from_control, self.shell_main) - self.shell_channel_thread.add_task(manager.listen_from_subshells) + self.shell_channel_thread.start_soon(self.shell_channel_thread_main) + self.shell_channel_thread.start_soon( + partial(manager.listen_from_control, self.shell_main) + ) + self.shell_channel_thread.start_soon(manager.listen_from_subshells) self.shell_channel_thread.start() else: if not self._is_test and self.shell_socket is not None: diff --git a/ipykernel/subshell_manager.py b/ipykernel/subshell_manager.py index ca037e777..14c4c57c3 100644 --- a/ipykernel/subshell_manager.py +++ b/ipykernel/subshell_manager.py @@ -7,6 +7,7 @@ import typing as t import uuid from dataclasses import dataclass +from functools import partial from threading import Lock, current_thread, main_thread import zmq @@ -186,8 +187,8 @@ async def _create_subshell(self, subshell_task: t.Any) -> str: await self._send_stream.send(subshell_id) address = self._get_inproc_socket_address(subshell_id) - thread.add_task(thread.create_pair_socket, self._context, address) - thread.add_task(subshell_task, subshell_id) + thread.start_soon(partial(thread.create_pair_socket, self._context, address)) + thread.start_soon(partial(subshell_task, subshell_id)) thread.start() return subshell_id diff --git a/ipykernel/thread.py b/ipykernel/thread.py index b3b33d320..a66cb2a46 100644 --- a/ipykernel/thread.py +++ b/ipykernel/thread.py @@ -1,10 +1,14 @@ """Base class for threads.""" + from __future__ import annotations -import typing as t +from collections.abc import Awaitable +from queue import Queue from threading import Event, Thread +from typing import Any, Callable from anyio import create_task_group, run, to_thread +from anyio.abc import TaskGroup CONTROL_THREAD_NAME = "Control" SHELL_CHANNEL_THREAD_NAME = "Shell channel" @@ -16,24 +20,58 @@ class BaseThread(Thread): def __init__(self, **kwargs): """Initialize the thread.""" super().__init__(**kwargs) + self.started = Event() + self.stopped = Event() self.pydev_do_not_trace = True self.is_pydev_daemon_thread = True - self.__stop = Event() - self._tasks_and_args: list[tuple[t.Any, t.Any]] = [] + self._tasks: Queue[tuple[str, Callable[[], Awaitable[Any]]] | None] = Queue() + self._result: Queue[Any] = Queue() + self._exception: Exception | None = None + + @property + def exception(self) -> Exception | None: + return self._exception + + @property + def task_group(self) -> TaskGroup: + return self._task_group - def add_task(self, task: t.Any, *args: t.Any) -> None: - # May only add tasks before the thread is started. - self._tasks_and_args.append((task, args)) + def start_soon(self, coro: Callable[[], Awaitable[Any]]) -> None: + self._tasks.put(("start_soon", coro)) - def run(self) -> t.Any: + def run_async(self, coro: Callable[[], Awaitable[Any]]) -> Any: + self._tasks.put(("run_async", coro)) + return self._result.get() + + def run_sync(self, func: Callable[..., Any]) -> Any: + self._tasks.put(("run_sync", func)) + return self._result.get() + + def run(self) -> None: """Run the thread.""" - return run(self._main) + try: + run(self._main) + except Exception as exc: + self._exception = exc async def _main(self) -> None: async with create_task_group() as tg: - for task, args in self._tasks_and_args: - tg.start_soon(task, *args) - await to_thread.run_sync(self.__stop.wait) + self._task_group = tg + self.started.set() + while True: + task = await to_thread.run_sync(self._tasks.get) + if task is None: + break + func, arg = task + if func == "start_soon": + tg.start_soon(arg) + elif func == "run_async": + res = await arg + self._result.put(res) + else: # func == "run_sync" + res = arg() + self._result.put(res) + tg.cancel_scope.cancel() def stop(self) -> None: @@ -41,4 +79,5 @@ def stop(self) -> None: This method is threadsafe. """ - self.__stop.set() + self._tasks.put(None) + self.stopped.set() From d11c0322bcbef391339a81f551c454d450f44cb1 Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Fri, 14 Feb 2025 08:40:34 +0100 Subject: [PATCH 59/97] Some formatting changes to prepare bumping ruff pre-commit. (#1329) --- examples/embedding/inprocess_qtconsole.py | 1 + examples/embedding/inprocess_terminal.py | 1 + examples/embedding/ipkernel_wxapp.py | 1 + hatch_build.py | 1 + ipykernel/__main__.py | 1 + ipykernel/compiler.py | 1 + ipykernel/connect.py | 4 ++-- ipykernel/datapub.py | 3 +-- ipykernel/debugger.py | 1 + ipykernel/embed.py | 3 +-- ipykernel/gui/gtk3embed.py | 3 +-- ipykernel/gui/gtkembed.py | 3 +-- ipykernel/heartbeat.py | 3 +-- ipykernel/inprocess/blocking.py | 3 ++- ipykernel/inprocess/client.py | 1 - ipykernel/inprocess/constants.py | 3 +-- ipykernel/inprocess/socket.py | 2 +- ipykernel/log.py | 1 + ipykernel/shellchannel.py | 1 + ipykernel/trio_runner.py | 1 + tests/test_zmq_shell.py | 2 +- 21 files changed, 22 insertions(+), 18 deletions(-) diff --git a/examples/embedding/inprocess_qtconsole.py b/examples/embedding/inprocess_qtconsole.py index 7a976a319..f256ecfed 100644 --- a/examples/embedding/inprocess_qtconsole.py +++ b/examples/embedding/inprocess_qtconsole.py @@ -1,4 +1,5 @@ """An in-process qt console app.""" + import os import tornado diff --git a/examples/embedding/inprocess_terminal.py b/examples/embedding/inprocess_terminal.py index c951859e8..432991a7b 100644 --- a/examples/embedding/inprocess_terminal.py +++ b/examples/embedding/inprocess_terminal.py @@ -1,4 +1,5 @@ """An in-process terminal example.""" + import os from anyio import run diff --git a/examples/embedding/ipkernel_wxapp.py b/examples/embedding/ipkernel_wxapp.py index f24ed9392..b36fcc312 100755 --- a/examples/embedding/ipkernel_wxapp.py +++ b/examples/embedding/ipkernel_wxapp.py @@ -16,6 +16,7 @@ Ref: Modified from wxPython source code wxPython/samples/simple/simple.py """ + # ----------------------------------------------------------------------------- # Imports # ----------------------------------------------------------------------------- diff --git a/hatch_build.py b/hatch_build.py index 934348050..4dfdd1a22 100644 --- a/hatch_build.py +++ b/hatch_build.py @@ -1,4 +1,5 @@ """A custom hatch build hook for ipykernel.""" + import shutil import sys from pathlib import Path diff --git a/ipykernel/__main__.py b/ipykernel/__main__.py index a1050e32e..59c864405 100644 --- a/ipykernel/__main__.py +++ b/ipykernel/__main__.py @@ -1,4 +1,5 @@ """The cli entry point for ipykernel.""" + if __name__ == "__main__": from ipykernel import kernelapp as app diff --git a/ipykernel/compiler.py b/ipykernel/compiler.py index e42007ed6..6652e08ae 100644 --- a/ipykernel/compiler.py +++ b/ipykernel/compiler.py @@ -1,4 +1,5 @@ """Compiler helpers for the debugger.""" + import os import sys import tempfile diff --git a/ipykernel/connect.py b/ipykernel/connect.py index 117484599..1e1f16cdc 100644 --- a/ipykernel/connect.py +++ b/ipykernel/connect.py @@ -1,5 +1,5 @@ -"""Connection file-related utilities for the kernel -""" +"""Connection file-related utilities for the kernel""" + # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. from __future__ import annotations diff --git a/ipykernel/datapub.py b/ipykernel/datapub.py index cc19696db..b0ab2c7b2 100644 --- a/ipykernel/datapub.py +++ b/ipykernel/datapub.py @@ -1,8 +1,7 @@ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -"""Publishing native (typically pickled) objects. -""" +"""Publishing native (typically pickled) objects.""" import warnings diff --git a/ipykernel/debugger.py b/ipykernel/debugger.py index 780d18015..57804d2db 100644 --- a/ipykernel/debugger.py +++ b/ipykernel/debugger.py @@ -1,4 +1,5 @@ """Debugger implementation for the IPython kernel.""" + import os import re import sys diff --git a/ipykernel/embed.py b/ipykernel/embed.py index ad22e2a1e..8ef9c1848 100644 --- a/ipykernel/embed.py +++ b/ipykernel/embed.py @@ -1,5 +1,4 @@ -"""Simple function for embedding an IPython kernel -""" +"""Simple function for embedding an IPython kernel""" # ----------------------------------------------------------------------------- # Imports # ----------------------------------------------------------------------------- diff --git a/ipykernel/gui/gtk3embed.py b/ipykernel/gui/gtk3embed.py index 3317ecfe4..ab4ec2226 100644 --- a/ipykernel/gui/gtk3embed.py +++ b/ipykernel/gui/gtk3embed.py @@ -1,5 +1,4 @@ -"""GUI support for the IPython ZeroMQ kernel - GTK toolkit support. -""" +"""GUI support for the IPython ZeroMQ kernel - GTK toolkit support.""" # ----------------------------------------------------------------------------- # Copyright (C) 2010-2011 The IPython Development Team # diff --git a/ipykernel/gui/gtkembed.py b/ipykernel/gui/gtkembed.py index e87249ead..6f3b6d166 100644 --- a/ipykernel/gui/gtkembed.py +++ b/ipykernel/gui/gtkembed.py @@ -1,5 +1,4 @@ -"""GUI support for the IPython ZeroMQ kernel - GTK toolkit support. -""" +"""GUI support for the IPython ZeroMQ kernel - GTK toolkit support.""" # ----------------------------------------------------------------------------- # Copyright (C) 2010-2011 The IPython Development Team # diff --git a/ipykernel/heartbeat.py b/ipykernel/heartbeat.py index 9816959dd..7706312e1 100644 --- a/ipykernel/heartbeat.py +++ b/ipykernel/heartbeat.py @@ -1,5 +1,4 @@ -"""The client and server for a basic ping-pong style heartbeat. -""" +"""The client and server for a basic ping-pong style heartbeat.""" # ----------------------------------------------------------------------------- # Copyright (C) 2008-2011 The IPython Development Team diff --git a/ipykernel/inprocess/blocking.py b/ipykernel/inprocess/blocking.py index b5c421a79..4069d37c2 100644 --- a/ipykernel/inprocess/blocking.py +++ b/ipykernel/inprocess/blocking.py @@ -1,7 +1,8 @@ -""" Implements a fully blocking kernel client. +"""Implements a fully blocking kernel client. Useful for test suites and blocking terminal interfaces. """ + import sys # ----------------------------------------------------------------------------- diff --git a/ipykernel/inprocess/client.py b/ipykernel/inprocess/client.py index 8ca97470f..46fcd42e0 100644 --- a/ipykernel/inprocess/client.py +++ b/ipykernel/inprocess/client.py @@ -11,7 +11,6 @@ # Imports # ----------------------------------------------------------------------------- - from jupyter_client.client import KernelClient from jupyter_client.clientabc import KernelClientABC diff --git a/ipykernel/inprocess/constants.py b/ipykernel/inprocess/constants.py index 6133c757d..16d572083 100644 --- a/ipykernel/inprocess/constants.py +++ b/ipykernel/inprocess/constants.py @@ -1,5 +1,4 @@ -"""Shared constants. -""" +"""Shared constants.""" # Because inprocess communication is not networked, we can use a common Session # key everywhere. This is not just the empty bytestring to avoid tripping diff --git a/ipykernel/inprocess/socket.py b/ipykernel/inprocess/socket.py index 5a2e0008b..05b45687c 100644 --- a/ipykernel/inprocess/socket.py +++ b/ipykernel/inprocess/socket.py @@ -1,4 +1,4 @@ -""" Defines a dummy socket implementing (part of) the zmq.Socket interface. """ +"""Defines a dummy socket implementing (part of) the zmq.Socket interface.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. diff --git a/ipykernel/log.py b/ipykernel/log.py index bbd4c445b..91a4e114e 100644 --- a/ipykernel/log.py +++ b/ipykernel/log.py @@ -1,4 +1,5 @@ """A PUB log handler.""" + import warnings from zmq.log.handlers import PUBHandler diff --git a/ipykernel/shellchannel.py b/ipykernel/shellchannel.py index bc0459c46..10abdb359 100644 --- a/ipykernel/shellchannel.py +++ b/ipykernel/shellchannel.py @@ -1,4 +1,5 @@ """A thread for a shell channel.""" + import zmq.asyncio from .subshell_manager import SubshellManager diff --git a/ipykernel/trio_runner.py b/ipykernel/trio_runner.py index a641feba8..6fb44107b 100644 --- a/ipykernel/trio_runner.py +++ b/ipykernel/trio_runner.py @@ -1,4 +1,5 @@ """A trio loop runner.""" + import builtins import logging import signal diff --git a/tests/test_zmq_shell.py b/tests/test_zmq_shell.py index dfd22dec0..8a8fe042b 100644 --- a/tests/test_zmq_shell.py +++ b/tests/test_zmq_shell.py @@ -1,4 +1,4 @@ -""" Tests for zmq shell / display publisher. """ +"""Tests for zmq shell / display publisher.""" # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. From 539aef4e76bca3b52ef8f572e8422ce888016ecf Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Fri, 14 Feb 2025 08:41:05 +0100 Subject: [PATCH 60/97] Ignore or fix most of the remaining ruff 0.9.6 errors (#1331) --- ipykernel/datapub.py | 2 +- pyproject.toml | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/ipykernel/datapub.py b/ipykernel/datapub.py index b0ab2c7b2..3bd107fef 100644 --- a/ipykernel/datapub.py +++ b/ipykernel/datapub.py @@ -29,7 +29,7 @@ class ZMQDataPublisher(Configurable): """A zmq data publisher.""" - topic = topic = CBytes(b"datapub") + topic = CBytes(b"datapub") session = Instance(Session, allow_none=True) pub_socket = Any(allow_none=True) parent_header = Dict({}) diff --git a/pyproject.toml b/pyproject.toml index 4de39428b..613b8e206 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -277,6 +277,9 @@ ignore = [ "PTH123", # use `X | Y` for type annotations, this does not works for dynamic getting type hints on older python "UP007", + "UP031", # Use format specifiers instead of percent format + "PT023", # Use `@pytest.mark.skip` over `@pytest.mark.skip()` + "PT001", # autofixable: Use `@pytest.fixture` over `@pytest.fixture()` ] unfixable = [ # Don't touch print statements From 779874fd76e81c4f1d6fd66027d77638a42bc9e7 Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Mon, 17 Feb 2025 20:12:03 +0100 Subject: [PATCH 61/97] fFix spyder kernel install (#1337) the name of the packages in newer ubuntu is just different. --- .github/workflows/downstream.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/downstream.yml b/.github/workflows/downstream.yml index 378bfaac2..4ec9d0252 100644 --- a/.github/workflows/downstream.yml +++ b/.github/workflows/downstream.yml @@ -132,7 +132,7 @@ jobs: - name: Install System Packages run: | sudo apt-get update - sudo apt-get install -y --no-install-recommends libegl1-mesa + sudo apt-get install -y --no-install-recommends libgl1 libglx-mesa0 - name: Install spyder-kernels dependencies shell: bash -l {0} run: | From 6aeb2b285955f8dddb270635e4c2156bcd067c2e Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Mon, 17 Feb 2025 20:15:22 +0100 Subject: [PATCH 62/97] Suggest to make implementations of some function always return awaitable (#1295) Co-authored-by: David Brochart --- ipykernel/kernelbase.py | 52 +++++++++++++++++++++++++++++++++++++++++ pyproject.toml | 4 ++++ 2 files changed, 56 insertions(+) diff --git a/ipykernel/kernelbase.py b/ipykernel/kernelbase.py index 6cd80991c..20dcd7f80 100644 --- a/ipykernel/kernelbase.py +++ b/ipykernel/kernelbase.py @@ -58,6 +58,14 @@ from ._version import kernel_protocol_version from .iostream import OutStream +_AWAITABLE_MESSAGE: str = ( + "For consistency across implementations, it is recommended that `{func_name}`" + " either be a coroutine function (`async def`) or return an awaitable object" + " (like an `asyncio.Future`). It might become a requirement in the future." + " Coroutine functions and awaitables have been supported since" + " ipykernel 6.0 (2021). {target} does not seem to return an awaitable" +) + def _accepts_parameters(meth, param_names): parameters = inspect.signature(meth).parameters @@ -745,6 +753,12 @@ async def execute_request(self, socket, ident, parent): if inspect.isawaitable(reply_content): reply_content = await reply_content + else: + warnings.warn( + _AWAITABLE_MESSAGE.format(func_name="do_execute", target=self.do_execute), + PendingDeprecationWarning, + stacklevel=1, + ) # Flush output before sending the reply. if sys.stdout is not None: @@ -805,6 +819,12 @@ async def complete_request(self, socket, ident, parent): matches = self.do_complete(code, cursor_pos) if inspect.isawaitable(matches): matches = await matches + else: + warnings.warn( + _AWAITABLE_MESSAGE.format(func_name="do_complete", target=self.do_complete), + PendingDeprecationWarning, + stacklevel=1, + ) matches = json_clean(matches) self.session.send(socket, "complete_reply", matches, parent, ident) @@ -833,6 +853,12 @@ async def inspect_request(self, socket, ident, parent): ) if inspect.isawaitable(reply_content): reply_content = await reply_content + else: + warnings.warn( + _AWAITABLE_MESSAGE.format(func_name="do_inspect", target=self.do_inspect), + PendingDeprecationWarning, + stacklevel=1, + ) # Before we send this object over, we scrub it for JSON usage reply_content = json_clean(reply_content) @@ -852,6 +878,12 @@ async def history_request(self, socket, ident, parent): reply_content = self.do_history(**content) if inspect.isawaitable(reply_content): reply_content = await reply_content + else: + warnings.warn( + _AWAITABLE_MESSAGE.format(func_name="do_history", target=self.do_history), + PendingDeprecationWarning, + stacklevel=1, + ) reply_content = json_clean(reply_content) msg = self.session.send(socket, "history_reply", reply_content, parent, ident) @@ -974,6 +1006,12 @@ async def shutdown_request(self, socket, ident, parent): content = self.do_shutdown(parent["content"]["restart"]) if inspect.isawaitable(content): content = await content + else: + warnings.warn( + _AWAITABLE_MESSAGE.format(func_name="do_shutdown", target=self.do_shutdown), + PendingDeprecationWarning, + stacklevel=1, + ) self.session.send(socket, "shutdown_reply", content, parent, ident=ident) # same content, but different msg_id for broadcasting on IOPub self._shutdown_message = self.session.msg("shutdown_reply", content, parent) @@ -998,6 +1036,12 @@ async def is_complete_request(self, socket, ident, parent): reply_content = self.do_is_complete(code) if inspect.isawaitable(reply_content): reply_content = await reply_content + else: + warnings.warn( + _AWAITABLE_MESSAGE.format(func_name="do_is_complete", target=self.do_is_complete), + PendingDeprecationWarning, + stacklevel=1, + ) reply_content = json_clean(reply_content) reply_msg = self.session.send(socket, "is_complete_reply", reply_content, parent, ident) self.log.debug("%s", reply_msg) @@ -1014,6 +1058,14 @@ async def debug_request(self, socket, ident, parent): reply_content = self.do_debug_request(content) if inspect.isawaitable(reply_content): reply_content = await reply_content + else: + warnings.warn( + _AWAITABLE_MESSAGE.format( + func_name="do_debug_request", target=self.do_debug_request + ), + PendingDeprecationWarning, + stacklevel=1, + ) reply_content = json_clean(reply_content) reply_msg = self.session.send(socket, "debug_reply", reply_content, parent, ident) self.log.debug("%s", reply_msg) diff --git a/pyproject.toml b/pyproject.toml index 613b8e206..ef49f0e43 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -190,6 +190,10 @@ filterwarnings= [ # ignore unclosed sqlite in traits "ignore:unclosed database in Date: Tue, 18 Feb 2025 14:24:16 +0100 Subject: [PATCH 63/97] Email is @python.org since 2018 (#1343) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index ef49f0e43..656afdedf 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "hatchling.build" [project] name = "ipykernel" dynamic = ["version"] -authors = [{name = "IPython Development Team", email = "ipython-dev@scipy.org"}] +authors = [{name = "IPython Development Team", email = "ipython-dev@python.org"}] license = {file = "LICENSE"} readme = "README.md" description = "IPython Kernel for Jupyter" From fa3e23ff4ee5621472b0a9201780a36dd9340eda Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Tue, 18 Feb 2025 14:35:31 +0100 Subject: [PATCH 64/97] Remove test_check job (#1335) --- .github/workflows/ci.yml | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 98deaf12d..090db5ef7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -205,22 +205,3 @@ jobs: - uses: actions/checkout@v4 - uses: jupyterlab/maintainer-tools/.github/actions/base-setup@v1 - uses: jupyterlab/maintainer-tools/.github/actions/check-links@v1 - - tests_check: # This job does nothing and is only used for the branch protection - if: always() - needs: - - coverage - - test_docs - - test_without_debugpy - - test_miniumum_versions - - test_lint - - test_prereleases - - check_release - - link_check - - test_sdist - runs-on: ubuntu-latest - steps: - - name: Decide whether the needed jobs succeeded or failed - uses: re-actors/alls-green@release/v1 - with: - jobs: ${{ toJSON(needs) }} From d9998d2ec7e2deea0794da30bc6d1453e024ae0c Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Tue, 18 Feb 2025 14:37:40 +0100 Subject: [PATCH 65/97] Enable ruff G002 and fix 6 occurences (#1341) --- ipykernel/kernelapp.py | 10 +++++----- pyproject.toml | 2 -- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/ipykernel/kernelapp.py b/ipykernel/kernelapp.py index bb847e65e..c0c0628ad 100644 --- a/ipykernel/kernelapp.py +++ b/ipykernel/kernelapp.py @@ -339,12 +339,12 @@ def init_sockets(self): self.shell_socket = context.socket(zmq.ROUTER) self.shell_socket.linger = 1000 self.shell_port = self._bind_socket(self.shell_socket, self.shell_port) - self.log.debug("shell ROUTER Channel on port: %i" % self.shell_port) + self.log.debug("shell ROUTER Channel on port: %i", self.shell_port) self.stdin_socket = zmq.Context(context).socket(zmq.ROUTER) self.stdin_socket.linger = 1000 self.stdin_port = self._bind_socket(self.stdin_socket, self.stdin_port) - self.log.debug("stdin ROUTER Channel on port: %i" % self.stdin_port) + self.log.debug("stdin ROUTER Channel on port: %i", self.stdin_port) if hasattr(zmq, "ROUTER_HANDOVER"): # set router-handover to workaround zeromq reconnect problems @@ -360,7 +360,7 @@ def init_control(self, context): self.control_socket = context.socket(zmq.ROUTER) self.control_socket.linger = 1000 self.control_port = self._bind_socket(self.control_socket, self.control_port) - self.log.debug("control ROUTER Channel on port: %i" % self.control_port) + self.log.debug("control ROUTER Channel on port: %i", self.control_port) self.debugpy_socket = context.socket(zmq.STREAM) self.debugpy_socket.linger = 1000 @@ -384,7 +384,7 @@ def init_iopub(self, context): self.iopub_socket = context.socket(zmq.PUB) self.iopub_socket.linger = 1000 self.iopub_port = self._bind_socket(self.iopub_socket, self.iopub_port) - self.log.debug("iopub PUB Channel on port: %i" % self.iopub_port) + self.log.debug("iopub PUB Channel on port: %i", self.iopub_port) self.configure_tornado_logger() self.iopub_thread = IOPubThread(self.iopub_socket, pipe=True) self.iopub_thread.start() @@ -398,7 +398,7 @@ def init_heartbeat(self): hb_ctx = zmq.Context() self.heartbeat = Heartbeat(hb_ctx, (self.transport, self.ip, self.hb_port)) self.hb_port = self.heartbeat.port - self.log.debug("Heartbeat REP Channel on port: %i" % self.hb_port) + self.log.debug("Heartbeat REP Channel on port: %i", self.hb_port) self.heartbeat.start() def close(self): diff --git a/pyproject.toml b/pyproject.toml index 656afdedf..a7e15cae9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -275,8 +275,6 @@ ignore = [ "ARG001", # Unused method argument: "ARG002", - # Logging statement uses `%` - "G002", # `open()` should be replaced by `Path.open()` "PTH123", # use `X | Y` for type annotations, this does not works for dynamic getting type hints on older python From 4d42cb8a7326b6bc07f4e975a15575e932dc2828 Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Tue, 18 Feb 2025 14:41:25 +0100 Subject: [PATCH 66/97] Remove unused ignores lints. (#1342) --- pyproject.toml | 24 +++--------------------- 1 file changed, 3 insertions(+), 21 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index a7e15cae9..3672b14fd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -257,18 +257,10 @@ ignore = [ "PLR", # Design related pylint codes # Allow non-abstract empty methods in abstract base classes "B027", - # Use of `assert` detected - "S101", - # Possible hardcoded password - "S105", "S106", - # `print` found - "T201", + "T201", # `print` found # Unnecessary `dict` call (rewrite as a literal) "C408", - # Use `contextlib.suppress(ValueError)` instead of try-except-pass - "SIM105", - # `try`-`except`-`pass` detected - "S110", + "SIM105", # Use `contextlib.suppress(ValueError)` instead of try-except-pass # Mutable class attributes should be annotated with `typing.ClassVar` "RUF012", # Unused function argument: @@ -277,21 +269,11 @@ ignore = [ "ARG002", # `open()` should be replaced by `Path.open()` "PTH123", - # use `X | Y` for type annotations, this does not works for dynamic getting type hints on older python - "UP007", + "UP007", # use `X | Y` for type annotations, this does not works for dynamic getting type hints on older python "UP031", # Use format specifiers instead of percent format "PT023", # Use `@pytest.mark.skip` over `@pytest.mark.skip()` "PT001", # autofixable: Use `@pytest.fixture` over `@pytest.fixture()` ] -unfixable = [ - # Don't touch print statements - "T201", - # Don't touch noqa lines - "RUF100", - # Don't touch imports - "F401", - "F403" -] [tool.ruff.lint.per-file-ignores] # B011 Do not call assert False since python -O removes these calls From 10c044c3b0f366c400ea483a505a06d62130ffde Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C5=82=20Krassowski?= <5832902+krassowski@users.noreply.github.com> Date: Wed, 19 Feb 2025 10:38:16 +0000 Subject: [PATCH 67/97] Pin sphinx to resolve docs build failures (#1347) --- pyproject.toml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 3672b14fd..64f00c4e9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -44,7 +44,9 @@ Tracker = "https://github.com/ipython/ipykernel/issues" [project.optional-dependencies] docs = [ - "sphinx", + # Sphinx pinned until `sphinx-autodoc-typehints` issue is resolved: + # https://github.com/tox-dev/sphinx-autodoc-typehints/issues/523 + "sphinx<8.2.0", "myst_parser", "pydata_sphinx_theme", "sphinxcontrib_github_alt", From fe96a1bacc8d505d1ccbab94335afe9a3824ca9d Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Wed, 19 Feb 2025 12:04:10 +0100 Subject: [PATCH 68/97] Refine deprecation error messages. (#1334) Knowing since when is useful. --- ipykernel/datapub.py | 4 ++-- ipykernel/log.py | 2 +- ipykernel/pickleutil.py | 2 +- ipykernel/serialize.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ipykernel/datapub.py b/ipykernel/datapub.py index 3bd107fef..e8cc501dc 100644 --- a/ipykernel/datapub.py +++ b/ipykernel/datapub.py @@ -20,7 +20,7 @@ from jupyter_client.session import Session, extract_header warnings.warn( - "ipykernel.datapub is deprecated. It has moved to ipyparallel.datapub", + "ipykernel.datapub is deprecated since ipykernel 4.3.0 (2016). It has moved to ipyparallel.datapub", DeprecationWarning, stacklevel=2, ) @@ -73,7 +73,7 @@ def publish_data(data): The data to be published. Think of it as a namespace. """ warnings.warn( - "ipykernel.datapub is deprecated. It has moved to ipyparallel.datapub", + "ipykernel.datapub is deprecated since ipykernel 4.3.0 (2016). It has moved to ipyparallel.datapub", DeprecationWarning, stacklevel=2, ) diff --git a/ipykernel/log.py b/ipykernel/log.py index 91a4e114e..c230065e8 100644 --- a/ipykernel/log.py +++ b/ipykernel/log.py @@ -5,7 +5,7 @@ from zmq.log.handlers import PUBHandler warnings.warn( - "ipykernel.log is deprecated. It has moved to ipyparallel.engine.log", + "ipykernel.log is deprecated since ipykernel 4.3.0 (2016). It has moved to ipyparallel.engine.log", DeprecationWarning, stacklevel=2, ) diff --git a/ipykernel/pickleutil.py b/ipykernel/pickleutil.py index 15fc0e675..5a6bc26fe 100644 --- a/ipykernel/pickleutil.py +++ b/ipykernel/pickleutil.py @@ -20,7 +20,7 @@ from traitlets.utils.importstring import import_item warnings.warn( - "ipykernel.pickleutil is deprecated. It has moved to ipyparallel.", + "ipykernel.pickleutil is deprecated since IPykernel 4.3.0 (2016). It has moved to ipyparallel.", DeprecationWarning, stacklevel=2, ) diff --git a/ipykernel/serialize.py b/ipykernel/serialize.py index 22ba5396e..55247cd67 100644 --- a/ipykernel/serialize.py +++ b/ipykernel/serialize.py @@ -35,7 +35,7 @@ from jupyter_client.session import MAX_BYTES, MAX_ITEMS warnings.warn( - "ipykernel.serialize is deprecated. It has moved to ipyparallel.serialize", + "ipykernel.serialize is deprecated since ipykernel 4.3.0 (2016). It has moved to ipyparallel.serialize", DeprecationWarning, stacklevel=2, ) From 486050f888fb0a0abf592d7d343d6d27b75a6fd2 Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Wed, 19 Feb 2025 12:05:09 +0100 Subject: [PATCH 69/97] Try to debug non-closed iopub socket (#1345) --- tests/test_connect.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tests/test_connect.py b/tests/test_connect.py index b4b739d97..c43023870 100644 --- a/tests/test_connect.py +++ b/tests/test_connect.py @@ -19,6 +19,21 @@ from .utils import TemporaryWorkingDirectory + +@pytest.fixture(scope="module", autouse=True) +def _enable_tracemalloc(): + try: + import tracemalloc + except ModuleNotFoundError: + # pypy + tracemalloc = None + if tracemalloc is not None: + tracemalloc.start() + yield + if tracemalloc is not None: + tracemalloc.stop() + + sample_info: dict = { "ip": "1.2.3.4", "transport": "ipc", From 28d096d57e4750ab4a92a69e3c6544140b384729 Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Wed, 19 Feb 2025 13:32:55 +0100 Subject: [PATCH 70/97] Delete always skipped test, fix trio test, fix framelocal has not .clear() (#1322) There are few chances we will ever use them again. Also a small refactor and unskip of trio test using trio nursery. --- ipykernel/embed.py | 3 ++- ipykernel/ipkernel.py | 15 ++++++++++++-- ipykernel/kernelapp.py | 23 +++++++++++++-------- pyproject.toml | 5 ++--- tests/test_embed_kernel.py | 12 +++++------ tests/test_ipkernel_direct.py | 38 ----------------------------------- tests/test_kernel_direct.py | 29 ++++---------------------- tests/test_kernelapp.py | 24 +++++++++------------- 8 files changed, 52 insertions(+), 97 deletions(-) diff --git a/ipykernel/embed.py b/ipykernel/embed.py index 8ef9c1848..34d32e2ff 100644 --- a/ipykernel/embed.py +++ b/ipykernel/embed.py @@ -48,9 +48,10 @@ def embed_kernel(module=None, local_ns=None, **kwargs): if module is None: module = caller_module if local_ns is None: - local_ns = caller_locals + local_ns = dict(**caller_locals) app.kernel.user_module = module + assert isinstance(local_ns, dict) app.kernel.user_ns = local_ns app.shell.set_completer_frame() # type:ignore[union-attr] app.start() diff --git a/ipykernel/ipkernel.py b/ipykernel/ipkernel.py index 5c4501698..a91c14a5e 100644 --- a/ipykernel/ipkernel.py +++ b/ipykernel/ipkernel.py @@ -18,7 +18,18 @@ from IPython.core import release from IPython.utils.tokenutil import line_at_cursor, token_at_cursor from jupyter_client.session import extract_header -from traitlets import Any, Bool, HasTraits, Instance, List, Type, default, observe, observe_compat +from traitlets import ( + Any, + Bool, + Dict, + HasTraits, + Instance, + List, + Type, + default, + observe, + observe_compat, +) from .comm.comm import BaseComm from .comm.manager import CommManager @@ -92,7 +103,7 @@ def _user_module_changed(self, change): if self.shell is not None: self.shell.user_module = change["new"] - user_ns = Instance("collections.abc.Mapping", allow_none=True) + user_ns = Dict(allow_none=True) @default("user_ns") def _default_user_ns(self): diff --git a/ipykernel/kernelapp.py b/ipykernel/kernelapp.py index c0c0628ad..72f648254 100644 --- a/ipykernel/kernelapp.py +++ b/ipykernel/kernelapp.py @@ -733,7 +733,7 @@ def init_pdb(self): pdb.set_trace = debugger.set_trace @catch_config_error - def initialize(self, argv=None): + def initialize(self, argv=None) -> None: """Initialize the application.""" self._init_asyncio_patch() super().initialize(argv) @@ -772,22 +772,29 @@ def initialize(self, argv=None): sys.stdout.flush() sys.stderr.flush() - def start(self) -> None: - """Start the application.""" + async def _start(self) -> None: + """ + Async version of start, when the loop is not controlled by IPykernel + + For example to be used in test suite with @pytest.mark.trio + """ if self.subapp is not None: self.subapp.start() return if self.poller is not None: self.poller.start() + await self.main() + + def start(self) -> None: + """Start the application.""" backend = "trio" if self.trio_loop else "asyncio" - run(self.main, backend=backend) - return + run(self._start, backend=backend) - async def _wait_to_enter_eventloop(self): + async def _wait_to_enter_eventloop(self) -> None: await self.kernel._eventloop_set.wait() await self.kernel.enter_eventloop() - async def main(self): + async def main(self) -> None: async with create_task_group() as tg: tg.start_soon(self._wait_to_enter_eventloop) tg.start_soon(self.kernel.start) @@ -795,7 +802,7 @@ async def main(self): if self.kernel.eventloop: self.kernel._eventloop_set.set() - def stop(self): + def stop(self) -> None: """Stop the kernel, thread-safe.""" self.kernel.stop() diff --git a/pyproject.toml b/pyproject.toml index 64f00c4e9..accdd5ceb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -56,14 +56,13 @@ docs = [ "trio" ] test = [ - "pytest>=7.0,<9", - "pytest-cov", "flaky", "ipyparallel", "pre-commit", + "pytest-cov", "pytest-timeout", + "pytest>=7.0,<9", "trio", - "pytest-asyncio>=0.23.5", ] cov = [ "coverage[toml]", diff --git a/tests/test_embed_kernel.py b/tests/test_embed_kernel.py index 685824071..0c74dd1f0 100644 --- a/tests/test_embed_kernel.py +++ b/tests/test_embed_kernel.py @@ -93,10 +93,10 @@ def connection_file_ready(connection_file): @flaky(max_runs=3) def test_embed_kernel_basic(): - """IPython.embed_kernel() is basically functional""" + """ipykernel.embed.embed_kernel() is basically functional""" cmd = "\n".join( [ - "from IPython import embed_kernel", + "from ipykernel.embed import embed_kernel", "def go():", " a=5", ' b="hi there"', @@ -129,10 +129,10 @@ def test_embed_kernel_basic(): @flaky(max_runs=3) def test_embed_kernel_namespace(): - """IPython.embed_kernel() inherits calling namespace""" + """ipykernel.embed.embed_kernel() inherits calling namespace""" cmd = "\n".join( [ - "from IPython import embed_kernel", + "from ipykernel.embed import embed_kernel", "def go():", " a=5", ' b="hi there"', @@ -168,10 +168,10 @@ def test_embed_kernel_namespace(): @flaky(max_runs=3) def test_embed_kernel_reentrant(): - """IPython.embed_kernel() can be called multiple times""" + """ipykernel.embed.embed_kernel() can be called multiple times""" cmd = "\n".join( [ - "from IPython import embed_kernel", + "from ipykernel.embed import embed_kernel", "count = 0", "def go():", " global count", diff --git a/tests/test_ipkernel_direct.py b/tests/test_ipkernel_direct.py index dfd0445cf..3e4819610 100644 --- a/tests/test_ipkernel_direct.py +++ b/tests/test_ipkernel_direct.py @@ -1,6 +1,5 @@ """Test IPythonKernel directly""" -import asyncio import os import pytest @@ -152,49 +151,12 @@ async def test_direct_clear(ipkernel): ipkernel.do_clear() -@pytest.mark.skip("ipykernel._cancel_on_sigint doesn't exist anymore") -async def test_cancel_on_sigint(ipkernel: IPythonKernel) -> None: - future: asyncio.Future = asyncio.Future() - # with ipkernel._cancel_on_sigint(future): - # pass - future.set_result(None) - - async def test_dispatch_debugpy(ipkernel: IPythonKernel) -> None: msg = ipkernel.session.msg("debug_request", {}) msg_list = ipkernel.session.serialize(msg) await ipkernel.receive_debugpy_message(msg_list) -@pytest.mark.skip("Queues don't exist anymore") -async def test_start(ipkernel: IPythonKernel) -> None: - shell_future: asyncio.Future = asyncio.Future() - - async def fake_dispatch_queue(): - shell_future.set_result(None) - - ipkernel.dispatch_queue = fake_dispatch_queue # type:ignore - ipkernel.start() - ipkernel.debugpy_stream = None - ipkernel.start() - await ipkernel.process_one(False) - await shell_future - - -@pytest.mark.skip("Queues don't exist anymore") -async def test_start_no_debugpy(ipkernel: IPythonKernel) -> None: - shell_future: asyncio.Future = asyncio.Future() - - async def fake_dispatch_queue(): - shell_future.set_result(None) - - ipkernel.dispatch_queue = fake_dispatch_queue # type:ignore - ipkernel.debugpy_stream = None - ipkernel.start() - - await shell_future - - def test_create_comm(): assert isinstance(_create_comm(), BaseComm) diff --git a/tests/test_kernel_direct.py b/tests/test_kernel_direct.py index ab62404b2..6ff1c1d9c 100644 --- a/tests/test_kernel_direct.py +++ b/tests/test_kernel_direct.py @@ -3,9 +3,7 @@ # Copyright (c) IPython Development Team. # Distributed under the terms of the Modified BSD License. -import asyncio import os -import warnings import pytest @@ -108,19 +106,6 @@ async def test_direct_debug_request(kernel): assert reply["header"]["msg_type"] == "debug_reply" -@pytest.mark.skip("Shell streams don't exist anymore") -async def test_deprecated_features(kernel): - with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) - header = kernel._parent_header - assert isinstance(header, dict) - shell_streams = kernel.shell_streams - assert len(shell_streams) == 1 - assert shell_streams[0] == kernel.shell_stream - warnings.simplefilter("ignore", RuntimeWarning) - kernel.shell_streams = [kernel.shell_stream, kernel.shell_stream] - - async def test_process_control(kernel): from jupyter_client.session import DELIM @@ -142,12 +127,6 @@ async def test_dispatch_shell(kernel): await kernel.process_shell_message(msg) -@pytest.mark.skip("kernelbase.do_one_iteration doesn't exist anymore") -async def test_do_one_iteration(kernel): - kernel.msg_queue = asyncio.Queue() - await kernel.do_one_iteration() - - async def test_publish_debug_event(kernel): kernel._publish_debug_event({}) @@ -160,7 +139,7 @@ async def test_send_interrupt_children(kernel): kernel._send_interrupt_children() -# TODO: this causes deadlock -# async def test_direct_usage_request(kernel): -# reply = await kernel.test_control_message("usage_request", {}) -# assert reply['header']['msg_type'] == 'usage_reply' +@pytest.mark.skip(reason="this causes deadlock") +async def test_direct_usage_request(kernel): + reply = await kernel.test_control_message("usage_request", {}) + assert reply["header"]["msg_type"] == "usage_reply" diff --git a/tests/test_kernelapp.py b/tests/test_kernelapp.py index ec91687f4..750e55b50 100644 --- a/tests/test_kernelapp.py +++ b/tests/test_kernelapp.py @@ -12,11 +12,6 @@ from .conftest import MockKernel from .utils import TemporaryWorkingDirectory -try: - import trio -except ImportError: - trio = None - @pytest.mark.skipif(os.name == "nt", reason="requires ipc") def test_init_ipc_socket(): @@ -118,21 +113,22 @@ def test_merge_connection_file(): os.remove(cf) -# FIXME: @pytest.mark.skipif(trio is None, reason="requires trio") -@pytest.mark.skip() -def test_trio_loop(): +@pytest.mark.skip("Something wrong with CI") +@pytest.mark.parametrize("anyio_backend", ["trio"]) +async def test_trio_loop(anyio_backend): + import trio + app = IPKernelApp(trio_loop=True) - def trigger_stop(): - time.sleep(1) + async def trigger_stop(): + await trio.sleep(1) app.stop() - thread = threading.Thread(target=trigger_stop) - thread.start() - app.kernel = MockKernel() app.init_sockets() - app.start() + async with trio.open_nursery() as nursery: + nursery.start_soon(app._start) + nursery.start_soon(trigger_stop) app.cleanup_connection_file() app.kernel.destroy() app.close() From f0d0d91052a3237f4dcf550ec1d662ca759ed1a2 Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Wed, 19 Feb 2025 14:12:35 +0100 Subject: [PATCH 71/97] Make our own mock kernel methods async (#1346) --- tests/conftest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/conftest.py b/tests/conftest.py index 2c2665551..22714c6ef 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -168,7 +168,7 @@ def __init__(self, *args, **kwargs): self.shell = MagicMock() super().__init__(*args, **kwargs) - def do_execute( + async def do_execute( self, code, silent, store_history=True, user_expressions=None, allow_stdin=False ): if not silent: From f9cfd11b0751125a584a3545cceac7cf0e9b6651 Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Wed, 19 Feb 2025 14:13:01 +0100 Subject: [PATCH 72/97] Bump mypy (#1333) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Michał Krassowski <5832902+krassowski@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- ipykernel/inprocess/blocking.py | 1 + ipykernel/inprocess/client.py | 26 ++++++++++++++++++++------ ipykernel/inprocess/session.py | 11 ++++++++++- ipykernel/kernelapp.py | 2 +- ipykernel/zmqshell.py | 20 ++++++++++---------- 6 files changed, 44 insertions(+), 20 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 58aa38cea..62514712d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -40,11 +40,10 @@ repos: types_or: [yaml, html, json] - repo: https://github.com/pre-commit/mirrors-mypy - rev: "v1.8.0" + rev: "v1.15.0" hooks: - id: mypy files: ipykernel - stages: [manual] args: ["--install-types", "--non-interactive"] additional_dependencies: [ @@ -52,6 +51,7 @@ repos: "ipython>=8.16.1", "jupyter_client>=8.5", "appnope", + "types-psutil", ] - repo: https://github.com/adamchainz/blacken-docs diff --git a/ipykernel/inprocess/blocking.py b/ipykernel/inprocess/blocking.py index 4069d37c2..4c71aae7f 100644 --- a/ipykernel/inprocess/blocking.py +++ b/ipykernel/inprocess/blocking.py @@ -69,6 +69,7 @@ def call_handlers(self, msg): _raw_input = self.client.kernel._sys_raw_input prompt = msg["content"]["prompt"] print(prompt, end="", file=sys.__stdout__) + assert sys.__stdout__ is not None sys.__stdout__.flush() self.client.input(_raw_input()) diff --git a/ipykernel/inprocess/client.py b/ipykernel/inprocess/client.py index 46fcd42e0..a282feb92 100644 --- a/ipykernel/inprocess/client.py +++ b/ipykernel/inprocess/client.py @@ -11,6 +11,9 @@ # Imports # ----------------------------------------------------------------------------- + +from typing import Any + from jupyter_client.client import KernelClient from jupyter_client.clientabc import KernelClientABC @@ -54,9 +57,9 @@ def _default_blocking_class(self): return BlockingInProcessKernelClient - def get_connection_info(self): + def get_connection_info(self, session: bool = False): """Get the connection info for the client.""" - d = super().get_connection_info() + d = super().get_connection_info(session=session) d["kernel"] = self.kernel # type:ignore[assignment] return d @@ -99,9 +102,18 @@ def hb_channel(self): # Methods for sending specific messages # ------------------------------------- - async def execute( - self, code, silent=False, store_history=True, user_expressions=None, allow_stdin=None - ): + # Feb 2025: superclass in jupyter-Client is sync, + # it should likely be made all consistent and push + # jupyter_client async as well + async def execute( # type:ignore [override] + self, + code: str, + silent: bool = False, + store_history: bool = True, + user_expressions: dict[str, Any] | None = None, + allow_stdin: bool | None = None, + stop_on_error: bool = True, + ) -> str: """Execute code on the client.""" if allow_stdin is None: allow_stdin = self.allow_stdin @@ -114,7 +126,9 @@ async def execute( ) msg = self.session.msg("execute_request", content) await self._dispatch_to_kernel(msg) - return msg["header"]["msg_id"] + res = msg["header"]["msg_id"] + assert isinstance(res, str) + return res async def complete(self, code, cursor_pos=None): """Get code completion.""" diff --git a/ipykernel/inprocess/session.py b/ipykernel/inprocess/session.py index 0eaed2c60..390ac9954 100644 --- a/ipykernel/inprocess/session.py +++ b/ipykernel/inprocess/session.py @@ -1,8 +1,17 @@ +from typing import Any + from jupyter_client.session import Session as _Session class Session(_Session): - async def recv(self, socket, copy=True): + # superclass is not async. + async def recv( # type: ignore[override] + self, socket, mode: int = 0, content: bool = True, copy=True + ) -> Any: + """ + mode, content, copy have no effect, but are present for superclass compatibility + + """ return await socket.recv_multipart() def send( diff --git a/ipykernel/kernelapp.py b/ipykernel/kernelapp.py index 72f648254..1b6fec6fb 100644 --- a/ipykernel/kernelapp.py +++ b/ipykernel/kernelapp.py @@ -514,7 +514,7 @@ def init_io(self): isinstance(handler, StreamHandler) and (buffer := getattr(handler.stream, "buffer", None)) and (fileno := getattr(buffer, "fileno", None)) - and fileno() == sys.stderr._original_stdstream_fd # type:ignore[attr-defined] + and fileno() == sys.stderr._original_stdstream_fd ): self.log.debug("Seeing logger to stderr, rerouting to raw filedescriptor.") io_wrapper = TextIOWrapper( diff --git a/ipykernel/zmqshell.py b/ipykernel/zmqshell.py index c5390df34..ef682f940 100644 --- a/ipykernel/zmqshell.py +++ b/ipykernel/zmqshell.py @@ -78,12 +78,16 @@ def _hooks(self): self._thread_local.hooks = [] return self._thread_local.hooks - def publish( + # Feb: 2025 IPython has a deprecated, `source` parameter, marked for removal that + # triggers typing errors. + def publish( # type: ignore [override] self, data, metadata=None, + *, transient=None, update=False, + **kwargs, ): """Publish a display-data message @@ -508,7 +512,7 @@ def _update_exit_now(self, change): # Over ZeroMQ, GUI control isn't done with PyOS_InputHook as there is no # interactive input being read; we provide event loop support in ipkernel - def enable_gui(self, gui): + def enable_gui(self, gui=None): """Enable a given guil.""" from .eventloops import enable_gui as real_enable_gui @@ -654,14 +658,10 @@ def set_parent(self, parent): self.display_pub.set_parent(parent) # type:ignore[attr-defined] if hasattr(self, "_data_pub"): self.data_pub.set_parent(parent) - try: - sys.stdout.set_parent(parent) # type:ignore[attr-defined] - except AttributeError: - pass - try: - sys.stderr.set_parent(parent) # type:ignore[attr-defined] - except AttributeError: - pass + if hasattr(sys.stdout, "set_parent"): + sys.stdout.set_parent(parent) + if hasattr(sys.stderr, "set_parent"): + sys.stderr.set_parent(parent) def get_parent(self): """Get the parent header.""" From f1609ea68c6faba24a9428c133951f1de8504b3d Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Thu, 20 Feb 2025 08:58:04 +0100 Subject: [PATCH 73/97] Disable 3 failing downstream tests, but keep testing the rest. (#1349) Also remove the -x, because if there is many failures, we want to knw that. --- .github/workflows/downstream.yml | 4 ++-- ipykernel/inprocess/client.py | 6 +++--- tests/test_subshells.py | 1 + 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/.github/workflows/downstream.yml b/.github/workflows/downstream.yml index 4ec9d0252..945b47ccb 100644 --- a/.github/workflows/downstream.yml +++ b/.github/workflows/downstream.yml @@ -116,7 +116,7 @@ jobs: shell: bash -l {0} run: | cd ${GITHUB_WORKSPACE}/../qtconsole - xvfb-run --auto-servernum ${pythonLocation}/bin/python -m pytest -x -vv -s --full-trace --color=yes qtconsole + xvfb-run --auto-servernum ${pythonLocation}/bin/python -m pytest -vv -s --full-trace --color=yes -k "not test_execute" qtconsole spyder_kernels: runs-on: ubuntu-latest @@ -147,4 +147,4 @@ jobs: shell: bash -l {0} run: | cd ${GITHUB_WORKSPACE}/../spyder-kernels - xvfb-run --auto-servernum ${pythonLocation}/bin/python -m pytest -x -vv -s --full-trace --color=yes spyder_kernels + xvfb-run --auto-servernum ${pythonLocation}/bin/python -m pytest -vv -s --full-trace --color=yes -k 'not test_interrupt and not test_enter_debug_after_interruption' spyder_kernels diff --git a/ipykernel/inprocess/client.py b/ipykernel/inprocess/client.py index a282feb92..130a0c567 100644 --- a/ipykernel/inprocess/client.py +++ b/ipykernel/inprocess/client.py @@ -12,7 +12,7 @@ # ----------------------------------------------------------------------------- -from typing import Any +from typing import Any, Optional from jupyter_client.client import KernelClient from jupyter_client.clientabc import KernelClientABC @@ -110,8 +110,8 @@ async def execute( # type:ignore [override] code: str, silent: bool = False, store_history: bool = True, - user_expressions: dict[str, Any] | None = None, - allow_stdin: bool | None = None, + user_expressions: Optional[dict[str, Any]] = None, + allow_stdin: Optional[bool] = None, stop_on_error: bool = True, ) -> str: """Execute code on the client.""" diff --git a/tests/test_subshells.py b/tests/test_subshells.py index 0b1c1f1aa..889df1b2e 100644 --- a/tests/test_subshells.py +++ b/tests/test_subshells.py @@ -218,6 +218,7 @@ def test_run_concurrently_timing(include_main_shell): assert duration < timedelta(seconds=sum(times)) +@pytest.mark.xfail(strict=False, reason="subshell still sometime give different results") def test_execution_count(): with new_kernel() as kc: subshell_id = create_subshell_helper(kc)["subshell_id"] From a1f07b75bfd9aa02afe0d3cc8cb8f8da30d13a84 Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Thu, 20 Feb 2025 09:01:36 +0100 Subject: [PATCH 74/97] Try to reenable tests from downstream ipywidgets (#1350) --- .github/workflows/downstream.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/downstream.yml b/.github/workflows/downstream.yml index 945b47ccb..8492f0ba4 100644 --- a/.github/workflows/downstream.yml +++ b/.github/workflows/downstream.yml @@ -38,7 +38,7 @@ jobs: uses: jupyterlab/maintainer-tools/.github/actions/downstream-test@v1 with: package_name: ipywidgets - test_command: pytest -vv -raXxs -k \"not deprecation_fa_icons and not tooltip_deprecation and not on_submit_deprecation\" -W default --durations 10 --color=yes + test_command: pytest -vv -raXxs -W default --durations 10 --color=yes jupyter_client: runs-on: ubuntu-latest From 367d3d0ee0a6594b8704db82d0f5d5c3bf862442 Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Thu, 20 Feb 2025 15:57:00 +0100 Subject: [PATCH 75/97] Don't rerun test with --lf it hides failures. (#1324) --- .github/workflows/ci.yml | 12 ++++++------ .readthedocs.yaml | 2 +- tests/test_eventloop.py | 16 ++++++++++------ 3 files changed, 17 insertions(+), 13 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 090db5ef7..1dcf7e7d6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -49,19 +49,19 @@ jobs: timeout-minutes: 15 if: ${{ !startsWith( matrix.python-version, 'pypy' ) && !startsWith(matrix.os, 'windows') }} run: | - hatch run cov:test --cov-fail-under 50 || hatch run test:test --lf + hatch run cov:test --cov-fail-under 50 - name: Run the tests on pypy timeout-minutes: 15 if: ${{ startsWith( matrix.python-version, 'pypy' ) }} run: | - hatch run test:nowarn || hatch run test:nowarn --lf + hatch run test:nowarn - name: Run the tests on Windows timeout-minutes: 15 if: ${{ startsWith(matrix.os, 'windows') }} run: | - hatch run cov:nowarn || hatch run test:nowarn --lf + hatch run cov:nowarn - name: Check Launcher run: | @@ -144,7 +144,7 @@ jobs: - name: Run the tests timeout-minutes: 15 - run: pytest -W default -vv || pytest --vv -W default --lf + run: pytest -W default -vv test_miniumum_versions: name: Test Minimum Versions @@ -164,7 +164,7 @@ jobs: - name: Run the unit tests run: | - hatch -v run test:nowarn || hatch run test:nowarn --lf + hatch -v run test:nowarn test_prereleases: name: Test Prereleases @@ -179,7 +179,7 @@ jobs: dependency_type: pre - name: Run the tests run: | - hatch run test:nowarn || hatch run test:nowarn --lf + hatch run test:nowarn make_sdist: name: Make SDist diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 7ab2c8bf0..8e17caad0 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -3,7 +3,7 @@ version: 2 build: os: ubuntu-22.04 tools: - python: "3.11" + python: "3.13" sphinx: configuration: docs/conf.py diff --git a/tests/test_eventloop.py b/tests/test_eventloop.py index 57a2ca913..7dc0106c8 100644 --- a/tests/test_eventloop.py +++ b/tests/test_eventloop.py @@ -98,12 +98,16 @@ def test_cocoa_loop(kernel): loop_cocoa(kernel) -@pytest.mark.skipif( - len(qt_guis_avail) == 0, reason="No viable version of PyQt or PySide installed." -) -def test_qt_enable_gui(kernel, capsys): - gui = qt_guis_avail[0] - +@pytest.mark.parametrize("gui", qt_guis_avail) +def test_qt_enable_gui(gui, kernel, capsys): + if os.getenv("GITHUB_ACTIONS", None) == "true" and gui == "qt5": + pytest.skip("Qt5 and GitHub action crash CPython") + if gui == "qt6" and sys.version_info < (3, 10): + pytest.skip( + "qt6 fails on 3.9 with AttributeError: module 'PySide6.QtPrintSupport' has no attribute 'QApplication'" + ) + if sys.platform == "linux" and gui == "qt6" and os.getenv("GITHUB_ACTIONS", None) == "true": + pytest.skip("qt6 fails on github CI with missing libEGL.so.1") enable_gui(gui, kernel) # We store the `QApplication` instance in the kernel. From ff74a6f905ec4c8e597597dec7a61cd33a72179f Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Thu, 20 Feb 2025 15:58:07 +0100 Subject: [PATCH 76/97] Check ignores warnings are still relevant. (#1340) --- pyproject.toml | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index accdd5ceb..ef5b6f54e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -166,29 +166,14 @@ timeout = 60 filterwarnings= [ # Fail on warnings "error", - - # Ignore our own warnings - "ignore:The `stream` parameter of `getpass.getpass` will have no effect:UserWarning", - "ignore:has moved to ipyparallel:DeprecationWarning", - # IPython warnings "ignore: `Completer.complete` is pending deprecation since IPython 6.0 and will be replaced by `Completer.completions`:PendingDeprecationWarning", "ignore: backends is deprecated since IPython 8.24, backends are managed in matplotlib and can be externally registered.:DeprecationWarning", "ignore: backend2gui is deprecated since IPython 8.24, backends are managed in matplotlib and can be externally registered.:DeprecationWarning", # Ignore jupyter_client warnings - "ignore:unclosed Date: Thu, 20 Feb 2025 16:07:54 +0100 Subject: [PATCH 77/97] Licence :: * trove classifers are deprecated (#1348) --- pyproject.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index ef5b6f54e..86c4f804b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,7 +14,6 @@ classifiers = [ "Intended Audience :: Developers", "Intended Audience :: System Administrators", "Intended Audience :: Science/Research", - "License :: OSI Approved :: BSD License", "Programming Language :: Python", "Programming Language :: Python :: 3", ] From 90133b7ac4801fa2c192c86c76982aa3a086896a Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Sat, 22 Feb 2025 19:24:40 +0100 Subject: [PATCH 78/97] Another try at tracking down ResourceWarning with tracemalloc. (#1353) --- pyproject.toml | 4 ++++ tests/conftest.py | 41 +++++++++++++++++++++++++++++++++++ tests/test_connect.py | 17 +-------------- tests/test_ipkernel_direct.py | 2 +- 4 files changed, 47 insertions(+), 17 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 86c4f804b..fb3b4d078 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -173,6 +173,10 @@ filterwarnings= [ # Ignore jupyter_client warnings "module:Jupyter is migrating its paths to use standard platformdirs:DeprecationWarning", + # we do not want to raise on resources warnings, or we will not have a chance to + # collect the messages and print the location of the leak + "always::ResourceWarning", + # ignore unclosed sqlite in traits "ignore:unclosed database in Date: Sat, 22 Feb 2025 19:42:37 +0100 Subject: [PATCH 79/97] Move mypy disablinging error codes on a per-file basis (#1338) --- pyproject.toml | 52 +++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 51 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index fb3b4d078..908ac5370 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -131,12 +131,62 @@ build = [ [tool.mypy] files = "ipykernel" strict = true +#disable_error_code = [ "import-not-found"] disable_error_code = ["no-untyped-def", "no-untyped-call", "import-not-found"] enable_error_code = ["ignore-without-code", "redundant-expr", "truthy-bool"] follow_imports = "normal" -pretty = true +#pretty = true warn_unreachable = true +[[tool.mypy.overrides]] +module = [ + "ipykernel._eventloop_macos", + "ipykernel.comm.comm", + "ipykernel.comm.manager", + "ipykernel.compiler", + "ipykernel.connect", + "ipykernel.control", + "ipykernel.datapub", + "ipykernel.debugger", + "ipykernel.displayhook", + "ipykernel.embed", + "ipykernel.eventloops", + "ipykernel.gui.gtk3embed", + "ipykernel.gui.gtkembed", + "ipykernel.heartbeat", + "ipykernel.inprocess.blocking", + "ipykernel.inprocess.channels", + "ipykernel.inprocess.client", + "ipykernel.inprocess.ipkernel", + "ipykernel.inprocess.manager", + "ipykernel.inprocess.session", + "ipykernel.inprocess.socket", + "ipykernel.iostream", + "ipykernel.ipkernel", + "ipykernel.jsonutil", + "ipykernel.kernelapp", + "ipykernel.kernelbase", + "ipykernel.log", + "ipykernel.parentpoller", + "ipykernel.pickleutil", + "ipykernel.serialize", + "ipykernel.shellchannel", + "ipykernel.subshell", + "ipykernel.subshell_manager", + "ipykernel.thread", + "ipykernel.trio_runner", + "ipykernel.zmqshell" +] +#check_untyped_defs = false +#disallow_incomplete_defs = false +#disallow_untyped_calls = false +#disallow_untyped_decorators = false +#disallow_untyped_defs = false +#ignore_missing_imports = true +#follow_untyped_imports = false + + + [tool.pytest.ini_options] minversion = "6.0" xfail_strict = true From 4854752dc86e0e823c2a114e7ca68cecb76af442 Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Mon, 24 Feb 2025 09:46:46 +0100 Subject: [PATCH 80/97] Fix expected text depending on IPython version. (#1354) --- tests/test_start_kernel.py | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/tests/test_start_kernel.py b/tests/test_start_kernel.py index f2a632be0..71f4bdc0a 100644 --- a/tests/test_start_kernel.py +++ b/tests/test_start_kernel.py @@ -14,6 +14,14 @@ @flaky(max_runs=3) def test_ipython_start_kernel_userns(): + import IPython + + if IPython.version_info > (9, 0): # noqa:SIM108 + EXPECTED = "IPythonMainModule" + else: + # not this since https://github.com/ipython/ipython/pull/14754 + EXPECTED = "DummyMod" + cmd = dedent( """ from ipykernel.kernelapp import launch_new_instance @@ -40,7 +48,7 @@ def test_ipython_start_kernel_userns(): content = msg["content"] assert content["found"] text = content["data"]["text/plain"] - assert "DummyMod" in text + assert EXPECTED in text @flaky(max_runs=3) From 38b470a6f0b142d9107e093769be525bf5805358 Mon Sep 17 00:00:00 2001 From: David Brochart Date: Tue, 25 Feb 2025 18:42:30 +0100 Subject: [PATCH 81/97] Test more python versions (#1358) --- .github/workflows/ci.yml | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1dcf7e7d6..09836ee33 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -22,16 +22,14 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest, windows-latest, macos-latest] - python-version: ["3.9", "3.13"] - include: - - os: ubuntu-latest - python-version: "pypy-3.9" - - os: macos-latest - python-version: "3.10" - - os: ubuntu-latest - python-version: "3.11" - - os: ubuntu-latest - python-version: "3.12" + python-version: + - "3.9" + - "3.10" + - "3.11" + - "3.12" + - "3.13" + - "pypy-3.9" + - "pypy-3.10" steps: - name: Checkout uses: actions/checkout@v4 From dd07f5272eaf3c613a284421989985256a18e4ab Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 26 Feb 2025 10:35:33 +0100 Subject: [PATCH 82/97] chore: update pre-commit hooks (#1355) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- ipykernel/_version.py | 1 + ipykernel/inprocess/client.py | 1 - ipykernel/ipkernel.py | 6 +++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 62514712d..d36865156 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -22,7 +22,7 @@ repos: - id: trailing-whitespace - repo: https://github.com/python-jsonschema/check-jsonschema - rev: 0.31.1 + rev: 0.31.2 hooks: - id: check-github-workflows @@ -74,7 +74,7 @@ repos: - id: rst-inline-touching-normal - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.2.0 + rev: v0.9.7 hooks: - id: ruff types_or: [python, jupyter] @@ -83,7 +83,7 @@ repos: types_or: [python, jupyter] - repo: https://github.com/scientific-python/cookie - rev: "2024.01.24" + rev: "2025.01.22" hooks: - id: sp-repo-review additional_dependencies: ["repo-review[cli]"] diff --git a/ipykernel/_version.py b/ipykernel/_version.py index b4c5b1dab..166330638 100644 --- a/ipykernel/_version.py +++ b/ipykernel/_version.py @@ -1,6 +1,7 @@ """ store the current version info of the server. """ + from __future__ import annotations import re diff --git a/ipykernel/inprocess/client.py b/ipykernel/inprocess/client.py index 130a0c567..1dc8f81e6 100644 --- a/ipykernel/inprocess/client.py +++ b/ipykernel/inprocess/client.py @@ -11,7 +11,6 @@ # Imports # ----------------------------------------------------------------------------- - from typing import Any, Optional from jupyter_client.client import KernelClient diff --git a/ipykernel/ipkernel.py b/ipykernel/ipkernel.py index a91c14a5e..5ba500198 100644 --- a/ipykernel/ipkernel.py +++ b/ipykernel/ipkernel.py @@ -763,9 +763,9 @@ def run_closure(self: threading.Thread): for stream in [stdout, stderr]: if isinstance(stream, OutStream): if parent == kernel_thread_ident: - stream._thread_to_parent_header[ - self.ident - ] = kernel._new_threads_parent_header + stream._thread_to_parent_header[self.ident] = ( + kernel._new_threads_parent_header + ) else: stream._thread_to_parent[self.ident] = parent _threading_Thread_run(self) From 700c26f29126d2ced5480f0314642b12cff0d493 Mon Sep 17 00:00:00 2001 From: David Brochart Date: Wed, 26 Feb 2025 23:04:40 +0100 Subject: [PATCH 83/97] Ensure test_start_app takes 1s to stop kernel (#1364) --- tests/test_kernelapp.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/test_kernelapp.py b/tests/test_kernelapp.py index 750e55b50..0f1d04373 100644 --- a/tests/test_kernelapp.py +++ b/tests/test_kernelapp.py @@ -38,9 +38,12 @@ def trigger_stop(): app.stop() thread = threading.Thread(target=trigger_stop) + t0 = time.time() thread.start() app.init_sockets() app.start() + t1 = time.time() + assert t1 - t0 >= 1 app.cleanup_connection_file() app.kernel.destroy() app.close() From 7a18189a8c9f41139fcfee5160f41992657bab9c Mon Sep 17 00:00:00 2001 From: David Brochart Date: Thu, 27 Feb 2025 10:42:40 +0100 Subject: [PATCH 84/97] Make kernelbase._eventloop_set event thread-safe (#1366) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- ipykernel/kernelapp.py | 4 ++-- ipykernel/kernelbase.py | 6 ++++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/ipykernel/kernelapp.py b/ipykernel/kernelapp.py index 1b6fec6fb..676d2d46f 100644 --- a/ipykernel/kernelapp.py +++ b/ipykernel/kernelapp.py @@ -20,7 +20,7 @@ import zmq import zmq.asyncio -from anyio import create_task_group, run +from anyio import create_task_group, run, to_thread from IPython.core.application import ( # type:ignore[attr-defined] BaseIPythonApplication, base_aliases, @@ -791,7 +791,7 @@ def start(self) -> None: run(self._start, backend=backend) async def _wait_to_enter_eventloop(self) -> None: - await self.kernel._eventloop_set.wait() + await to_thread.run_sync(self.kernel._eventloop_set.wait) await self.kernel.enter_eventloop() async def main(self) -> None: diff --git a/ipykernel/kernelbase.py b/ipykernel/kernelbase.py index 20dcd7f80..08f60e14e 100644 --- a/ipykernel/kernelbase.py +++ b/ipykernel/kernelbase.py @@ -36,7 +36,7 @@ import psutil import zmq -from anyio import TASK_STATUS_IGNORED, Event, create_task_group, sleep, to_thread +from anyio import TASK_STATUS_IGNORED, create_task_group, sleep, to_thread from anyio.abc import TaskStatus from IPython.core.error import StdinNotImplementedError from jupyter_client.session import Session @@ -235,12 +235,14 @@ def _parent_header(self): "list_subshell_request", ] - _eventloop_set: Event = Event() + _eventloop_set: threading.Event def __init__(self, **kwargs): """Initialize the kernel.""" super().__init__(**kwargs) + self._eventloop_set = threading.Event() + # Kernel application may swap stdout and stderr to OutStream, # which is the case in `IPKernelApp.init_io`, hence `sys.stdout` # can already by different from TextIO at initialization time. From a0f8352abb285f707c540972d37a6bcbead9da9e Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Fri, 28 Feb 2025 13:58:52 +0100 Subject: [PATCH 85/97] Remove deprecated modules since 4.3 (2016). (#1352) --- ipykernel/datapub.py | 83 ------- ipykernel/pickleutil.py | 487 --------------------------------------- ipykernel/serialize.py | 203 ---------------- tests/test_pickleutil.py | 84 ------- 4 files changed, 857 deletions(-) delete mode 100644 ipykernel/datapub.py delete mode 100644 ipykernel/pickleutil.py delete mode 100644 ipykernel/serialize.py delete mode 100644 tests/test_pickleutil.py diff --git a/ipykernel/datapub.py b/ipykernel/datapub.py deleted file mode 100644 index e8cc501dc..000000000 --- a/ipykernel/datapub.py +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - -"""Publishing native (typically pickled) objects.""" - -import warnings - -from traitlets import Any, CBytes, Dict, Instance -from traitlets.config import Configurable - -from ipykernel.jsonutil import json_clean - -try: - # available since ipyparallel 5.0.0 - from ipyparallel.serialize import serialize_object -except ImportError: - # Deprecated since ipykernel 4.3.0 - from ipykernel.serialize import serialize_object - -from jupyter_client.session import Session, extract_header - -warnings.warn( - "ipykernel.datapub is deprecated since ipykernel 4.3.0 (2016). It has moved to ipyparallel.datapub", - DeprecationWarning, - stacklevel=2, -) - - -class ZMQDataPublisher(Configurable): - """A zmq data publisher.""" - - topic = CBytes(b"datapub") - session = Instance(Session, allow_none=True) - pub_socket = Any(allow_none=True) - parent_header = Dict({}) - - def set_parent(self, parent): - """Set the parent for outbound messages.""" - self.parent_header = extract_header(parent) - - def publish_data(self, data): - """publish a data_message on the IOPub channel - - Parameters - ---------- - data : dict - The data to be published. Think of it as a namespace. - """ - session = self.session - assert session is not None - buffers = serialize_object( - data, - buffer_threshold=session.buffer_threshold, - item_threshold=session.item_threshold, - ) - content = json_clean(dict(keys=list(data.keys()))) - session.send( - self.pub_socket, - "data_message", - content=content, - parent=self.parent_header, - buffers=buffers, - ident=self.topic, - ) - - -def publish_data(data): - """publish a data_message on the IOPub channel - - Parameters - ---------- - data : dict - The data to be published. Think of it as a namespace. - """ - warnings.warn( - "ipykernel.datapub is deprecated since ipykernel 4.3.0 (2016). It has moved to ipyparallel.datapub", - DeprecationWarning, - stacklevel=2, - ) - - from ipykernel.zmqshell import ZMQInteractiveShell - - ZMQInteractiveShell.instance().data_pub.publish_data(data) diff --git a/ipykernel/pickleutil.py b/ipykernel/pickleutil.py deleted file mode 100644 index 5a6bc26fe..000000000 --- a/ipykernel/pickleutil.py +++ /dev/null @@ -1,487 +0,0 @@ -"""Pickle related utilities. Perhaps this should be called 'can'.""" - -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. -from __future__ import annotations - -import copy -import pickle -import sys -import typing -import warnings -from types import FunctionType - -# This registers a hook when it's imported -try: - from ipyparallel.serialize import codeutil # noqa: F401 -except ImportError: - pass -from traitlets.log import get_logger -from traitlets.utils.importstring import import_item - -warnings.warn( - "ipykernel.pickleutil is deprecated since IPykernel 4.3.0 (2016). It has moved to ipyparallel.", - DeprecationWarning, - stacklevel=2, -) - -buffer = memoryview -class_type = type - -PICKLE_PROTOCOL = pickle.DEFAULT_PROTOCOL - - -def _get_cell_type(a=None): - """the type of a closure cell doesn't seem to be importable, - so just create one - """ - - def inner(): - return a - - return type(inner.__closure__[0]) # type:ignore[index] - - -cell_type = _get_cell_type() - -# ------------------------------------------------------------------------------- -# Functions -# ------------------------------------------------------------------------------- - - -def interactive(f): - """decorator for making functions appear as interactively defined. - This results in the function being linked to the user_ns as globals() - instead of the module globals(). - """ - - # build new FunctionType, so it can have the right globals - # interactive functions never have closures, that's kind of the point - if isinstance(f, FunctionType): - mainmod = __import__("__main__") - f = FunctionType( - f.__code__, - mainmod.__dict__, - f.__name__, - f.__defaults__, - ) - # associate with __main__ for uncanning - f.__module__ = "__main__" - return f - - -def use_dill(): - """use dill to expand serialization support - - adds support for object methods and closures to serialization. - """ - # import dill causes most of the magic - import dill - - # dill doesn't work with cPickle, - # tell the two relevant modules to use plain pickle - - global pickle # noqa: PLW0603 - pickle = dill - - try: - from ipykernel import serialize - except ImportError: - pass - else: - serialize.pickle = dill # type:ignore[attr-defined] - - # disable special function handling, let dill take care of it - can_map.pop(FunctionType, None) - - -def use_cloudpickle(): - """use cloudpickle to expand serialization support - - adds support for object methods and closures to serialization. - """ - import cloudpickle - - global pickle # noqa: PLW0603 - pickle = cloudpickle - - try: - from ipykernel import serialize - except ImportError: - pass - else: - serialize.pickle = cloudpickle # type:ignore[attr-defined] - - # disable special function handling, let cloudpickle take care of it - can_map.pop(FunctionType, None) - - -# ------------------------------------------------------------------------------- -# Classes -# ------------------------------------------------------------------------------- - - -class CannedObject: - """A canned object.""" - - def __init__(self, obj, keys=None, hook=None): - """can an object for safe pickling - - Parameters - ---------- - obj - The object to be canned - keys : list (optional) - list of attribute names that will be explicitly canned / uncanned - hook : callable (optional) - An optional extra callable, - which can do additional processing of the uncanned object. - - Notes - ----- - large data may be offloaded into the buffers list, - used for zero-copy transfers. - """ - self.keys = keys or [] - self.obj = copy.copy(obj) - self.hook = can(hook) - for key in keys: - setattr(self.obj, key, can(getattr(obj, key))) - - self.buffers = [] - - def get_object(self, g=None): - """Get an object.""" - if g is None: - g = {} - obj = self.obj - for key in self.keys: - setattr(obj, key, uncan(getattr(obj, key), g)) - - if self.hook: - self.hook = uncan(self.hook, g) - self.hook(obj, g) - return self.obj - - -class Reference(CannedObject): - """object for wrapping a remote reference by name.""" - - def __init__(self, name): - """Initialize the reference.""" - if not isinstance(name, str): - raise TypeError("illegal name: %r" % name) - self.name = name - self.buffers = [] - - def __repr__(self): - """Get the string repr of the reference.""" - return "" % self.name - - def get_object(self, g=None): - """Get an object in the reference.""" - if g is None: - g = {} - - return eval(self.name, g) - - -class CannedCell(CannedObject): - """Can a closure cell""" - - def __init__(self, cell): - """Initialize the canned cell.""" - self.cell_contents = can(cell.cell_contents) - - def get_object(self, g=None): - """Get an object in the cell.""" - cell_contents = uncan(self.cell_contents, g) - - def inner(): - """Inner function.""" - return cell_contents - - return inner.__closure__[0] # type:ignore[index] - - -class CannedFunction(CannedObject): - """Can a function.""" - - def __init__(self, f): - """Initialize the can""" - self._check_type(f) - self.code = f.__code__ - self.defaults: typing.Optional[list[typing.Any]] - if f.__defaults__: - self.defaults = [can(fd) for fd in f.__defaults__] - else: - self.defaults = None - - self.closure: typing.Any - closure = f.__closure__ - if closure: - self.closure = tuple(can(cell) for cell in closure) - else: - self.closure = None - - self.module = f.__module__ or "__main__" - self.__name__ = f.__name__ - self.buffers = [] - - def _check_type(self, obj): - assert isinstance(obj, FunctionType), "Not a function type" - - def get_object(self, g=None): - """Get an object out of the can.""" - # try to load function back into its module: - if not self.module.startswith("__"): - __import__(self.module) - g = sys.modules[self.module].__dict__ - - if g is None: - g = {} - defaults = tuple(uncan(cfd, g) for cfd in self.defaults) if self.defaults else None - closure = tuple(uncan(cell, g) for cell in self.closure) if self.closure else None - return FunctionType(self.code, g, self.__name__, defaults, closure) - - -class CannedClass(CannedObject): - """A canned class object.""" - - def __init__(self, cls): - """Initialize the can.""" - self._check_type(cls) - self.name = cls.__name__ - self.old_style = not isinstance(cls, type) - self._canned_dict = {} - for k, v in cls.__dict__.items(): - if k not in ("__weakref__", "__dict__"): - self._canned_dict[k] = can(v) - mro = [] if self.old_style else cls.mro() - - self.parents = [can(c) for c in mro[1:]] - self.buffers = [] - - def _check_type(self, obj): - assert isinstance(obj, class_type), "Not a class type" - - def get_object(self, g=None): - """Get an object from the can.""" - parents = tuple(uncan(p, g) for p in self.parents) - return type(self.name, parents, uncan_dict(self._canned_dict, g=g)) - - -class CannedArray(CannedObject): - """A canned numpy array.""" - - def __init__(self, obj): - """Initialize the can.""" - from numpy import ascontiguousarray - - self.shape = obj.shape - self.dtype = obj.dtype.descr if obj.dtype.fields else obj.dtype.str - self.pickled = False - if sum(obj.shape) == 0: - self.pickled = True - elif obj.dtype == "O": - # can't handle object dtype with buffer approach - self.pickled = True - elif obj.dtype.fields and any(dt == "O" for dt, sz in obj.dtype.fields.values()): - self.pickled = True - if self.pickled: - # just pickle it - self.buffers = [pickle.dumps(obj, PICKLE_PROTOCOL)] - else: - # ensure contiguous - obj = ascontiguousarray(obj, dtype=None) - self.buffers = [buffer(obj)] - - def get_object(self, g=None): - """Get the object.""" - from numpy import frombuffer - - data = self.buffers[0] - if self.pickled: - # we just pickled it - return pickle.loads(data) - return frombuffer(data, dtype=self.dtype).reshape(self.shape) - - -class CannedBytes(CannedObject): - """A canned bytes object.""" - - @staticmethod - def wrap(buf: typing.Union[memoryview, bytes, typing.SupportsBytes]) -> bytes: - """Cast a buffer or memoryview object to bytes""" - if isinstance(buf, memoryview): - return buf.tobytes() - if not isinstance(buf, bytes): - return bytes(buf) - return buf - - def __init__(self, obj): - """Initialize the can.""" - self.buffers = [obj] - - def get_object(self, g=None): - """Get the canned object.""" - data = self.buffers[0] - return self.wrap(data) - - -class CannedBuffer(CannedBytes): - """A canned buffer.""" - - wrap = buffer # type:ignore[assignment] - - -class CannedMemoryView(CannedBytes): - """A canned memory view.""" - - wrap = memoryview # type:ignore[assignment] - - -# ------------------------------------------------------------------------------- -# Functions -# ------------------------------------------------------------------------------- - - -def _import_mapping(mapping, original=None): - """import any string-keys in a type mapping""" - log = get_logger() - log.debug("Importing canning map") - for key, _ in list(mapping.items()): - if isinstance(key, str): - try: - cls = import_item(key) - except Exception: - if original and key not in original: - # only message on user-added classes - log.error("canning class not importable: %r", key, exc_info=True) # noqa: G201 - mapping.pop(key) - else: - mapping[cls] = mapping.pop(key) - - -def istype(obj, check): - """like isinstance(obj, check), but strict - - This won't catch subclasses. - """ - if isinstance(check, tuple): - return any(type(obj) is cls for cls in check) - return type(obj) is check - - -def can(obj): - """prepare an object for pickling""" - - import_needed = False - - for cls, canner in can_map.items(): - if isinstance(cls, str): - import_needed = True - break - if istype(obj, cls): - return canner(obj) - - if import_needed: - # perform can_map imports, then try again - # this will usually only happen once - _import_mapping(can_map, _original_can_map) - return can(obj) - - return obj - - -def can_class(obj): - """Can a class object.""" - if isinstance(obj, class_type) and obj.__module__ == "__main__": - return CannedClass(obj) - return obj - - -def can_dict(obj): - """can the *values* of a dict""" - if istype(obj, dict): - newobj = {} - for k, v in obj.items(): - newobj[k] = can(v) - return newobj - return obj - - -sequence_types = (list, tuple, set) - - -def can_sequence(obj): - """can the elements of a sequence""" - if istype(obj, sequence_types): - t = type(obj) - return t([can(i) for i in obj]) - return obj - - -def uncan(obj, g=None): - """invert canning""" - - import_needed = False - for cls, uncanner in uncan_map.items(): - if isinstance(cls, str): - import_needed = True - break - if isinstance(obj, cls): - return uncanner(obj, g) - - if import_needed: - # perform uncan_map imports, then try again - # this will usually only happen once - _import_mapping(uncan_map, _original_uncan_map) - return uncan(obj, g) - - return obj - - -def uncan_dict(obj, g=None): - """Uncan a dict object.""" - if istype(obj, dict): - newobj = {} - for k, v in obj.items(): - newobj[k] = uncan(v, g) - return newobj - return obj - - -def uncan_sequence(obj, g=None): - """Uncan a sequence.""" - if istype(obj, sequence_types): - t = type(obj) - return t([uncan(i, g) for i in obj]) - return obj - - -# ------------------------------------------------------------------------------- -# API dictionaries -# ------------------------------------------------------------------------------- - -# These dicts can be extended for custom serialization of new objects - -can_map = { - "numpy.ndarray": CannedArray, - FunctionType: CannedFunction, - bytes: CannedBytes, - memoryview: CannedMemoryView, - cell_type: CannedCell, - class_type: can_class, -} -if buffer is not memoryview: - can_map[buffer] = CannedBuffer - -uncan_map: dict[type, typing.Any] = { - CannedObject: lambda obj, g: obj.get_object(g), - dict: uncan_dict, -} - -# for use in _import_mapping: -_original_can_map = can_map.copy() -_original_uncan_map = uncan_map.copy() diff --git a/ipykernel/serialize.py b/ipykernel/serialize.py deleted file mode 100644 index 55247cd67..000000000 --- a/ipykernel/serialize.py +++ /dev/null @@ -1,203 +0,0 @@ -"""serialization utilities for apply messages""" - -# Copyright (c) IPython Development Team. -# Distributed under the terms of the Modified BSD License. - -import pickle -import warnings -from itertools import chain - -try: - # available since ipyparallel 5.0.0 - from ipyparallel.serialize.canning import ( - CannedObject, - can, - can_sequence, - istype, - sequence_types, - uncan, - uncan_sequence, - ) - from ipyparallel.serialize.serialize import PICKLE_PROTOCOL -except ImportError: - # Deprecated since ipykernel 4.3.0 - from ipykernel.pickleutil import ( - PICKLE_PROTOCOL, - CannedObject, - can, - can_sequence, - istype, - sequence_types, - uncan, - uncan_sequence, - ) - -from jupyter_client.session import MAX_BYTES, MAX_ITEMS - -warnings.warn( - "ipykernel.serialize is deprecated since ipykernel 4.3.0 (2016). It has moved to ipyparallel.serialize", - DeprecationWarning, - stacklevel=2, -) - -# ----------------------------------------------------------------------------- -# Serialization Functions -# ----------------------------------------------------------------------------- - - -def _extract_buffers(obj, threshold=MAX_BYTES): - """extract buffers larger than a certain threshold""" - buffers = [] - if isinstance(obj, CannedObject) and obj.buffers: - for i, buf in enumerate(obj.buffers): - if len(buf) > threshold: - # buffer larger than threshold, prevent pickling - obj.buffers[i] = None - buffers.append(buf) - # buffer too small for separate send, coerce to bytes - # because pickling buffer objects just results in broken pointers - elif isinstance(buf, memoryview): - obj.buffers[i] = buf.tobytes() - return buffers - - -def _restore_buffers(obj, buffers): - """restore buffers extracted by""" - if isinstance(obj, CannedObject) and obj.buffers: - for i, buf in enumerate(obj.buffers): - if buf is None: - obj.buffers[i] = buffers.pop(0) - - -def serialize_object(obj, buffer_threshold=MAX_BYTES, item_threshold=MAX_ITEMS): - """Serialize an object into a list of sendable buffers. - - Parameters - ---------- - obj : object - The object to be serialized - buffer_threshold : int - The threshold (in bytes) for pulling out data buffers - to avoid pickling them. - item_threshold : int - The maximum number of items over which canning will iterate. - Containers (lists, dicts) larger than this will be pickled without - introspection. - - Returns - ------- - [bufs] : list of buffers representing the serialized object. - """ - buffers = [] - if istype(obj, sequence_types) and len(obj) < item_threshold: - cobj = can_sequence(obj) - for c in cobj: - buffers.extend(_extract_buffers(c, buffer_threshold)) - elif istype(obj, dict) and len(obj) < item_threshold: - cobj = {} - for k in sorted(obj): - c = can(obj[k]) - buffers.extend(_extract_buffers(c, buffer_threshold)) - cobj[k] = c - else: - cobj = can(obj) - buffers.extend(_extract_buffers(cobj, buffer_threshold)) - - buffers.insert(0, pickle.dumps(cobj, PICKLE_PROTOCOL)) - return buffers - - -def deserialize_object(buffers, g=None): - """reconstruct an object serialized by serialize_object from data buffers. - - Parameters - ---------- - buffers : list of buffers/bytes - g : globals to be used when uncanning - - Returns - ------- - (newobj, bufs) : unpacked object, and the list of remaining unused buffers. - """ - bufs = list(buffers) - pobj = bufs.pop(0) - canned = pickle.loads(pobj) - if istype(canned, sequence_types) and len(canned) < MAX_ITEMS: - for c in canned: - _restore_buffers(c, bufs) - newobj = uncan_sequence(canned, g) - elif istype(canned, dict) and len(canned) < MAX_ITEMS: - newobj = {} - for k in sorted(canned): - c = canned[k] - _restore_buffers(c, bufs) - newobj[k] = uncan(c, g) - else: - _restore_buffers(canned, bufs) - newobj = uncan(canned, g) - - return newobj, bufs - - -def pack_apply_message(f, args, kwargs, buffer_threshold=MAX_BYTES, item_threshold=MAX_ITEMS): - """pack up a function, args, and kwargs to be sent over the wire - - Each element of args/kwargs will be canned for special treatment, - but inspection will not go any deeper than that. - - Any object whose data is larger than `threshold` will not have their data copied - (only numpy arrays and bytes/buffers support zero-copy) - - Message will be a list of bytes/buffers of the format: - - [ cf, pinfo, , ] - - With length at least two + len(args) + len(kwargs) - """ - - arg_bufs = list( - chain.from_iterable(serialize_object(arg, buffer_threshold, item_threshold) for arg in args) - ) - - kw_keys = sorted(kwargs.keys()) - kwarg_bufs = list( - chain.from_iterable( - serialize_object(kwargs[key], buffer_threshold, item_threshold) for key in kw_keys - ) - ) - - info = dict(nargs=len(args), narg_bufs=len(arg_bufs), kw_keys=kw_keys) - - msg = [pickle.dumps(can(f), PICKLE_PROTOCOL)] - msg.append(pickle.dumps(info, PICKLE_PROTOCOL)) - msg.extend(arg_bufs) - msg.extend(kwarg_bufs) - - return msg - - -def unpack_apply_message(bufs, g=None, copy=True): - """unpack f,args,kwargs from buffers packed by pack_apply_message() - Returns: original f,args,kwargs""" - bufs = list(bufs) # allow us to pop - assert len(bufs) >= 2, "not enough buffers!" - pf = bufs.pop(0) - f = uncan(pickle.loads(pf), g) - pinfo = bufs.pop(0) - info = pickle.loads(pinfo) - arg_bufs, kwarg_bufs = bufs[: info["narg_bufs"]], bufs[info["narg_bufs"] :] - - args_list = [] - for _ in range(info["nargs"]): - arg, arg_bufs = deserialize_object(arg_bufs, g) - args_list.append(arg) - args = tuple(args_list) - assert not arg_bufs, "Shouldn't be any arg bufs left over" - - kwargs = {} - for key in info["kw_keys"]: - kwarg, kwarg_bufs = deserialize_object(kwarg_bufs, g) - kwargs[key] = kwarg - assert not kwarg_bufs, "Shouldn't be any kwarg bufs left over" - - return f, args, kwargs diff --git a/tests/test_pickleutil.py b/tests/test_pickleutil.py deleted file mode 100644 index 2c55a30e4..000000000 --- a/tests/test_pickleutil.py +++ /dev/null @@ -1,84 +0,0 @@ -import pickle -import sys -import warnings - -import pytest - -with warnings.catch_warnings(): - warnings.simplefilter("ignore") - from ipykernel.pickleutil import can, uncan - -if sys.platform.startswith("win"): - pytest.skip("skipping pickle tests on windows", allow_module_level=True) - - -def interactive(f): - f.__module__ = "__main__" - return f - - -def dumps(obj): - return pickle.dumps(can(obj)) - - -def loads(obj): - return uncan(pickle.loads(obj)) - - -def test_no_closure(): - @interactive - def foo(): - a = 5 - return a - - pfoo = dumps(foo) - bar = loads(pfoo) - assert foo() == bar() - - -def test_generator_closure(): - # this only creates a closure on Python 3 - @interactive - def foo(): - i = "i" - r = [i for j in (1, 2)] - return r - - pfoo = dumps(foo) - bar = loads(pfoo) - assert foo() == bar() - - -def test_nested_closure(): - @interactive - def foo(): - i = "i" - - def g(): - return i - - return g() - - pfoo = dumps(foo) - bar = loads(pfoo) - assert foo() == bar() - - -def test_closure(): - i = "i" - - @interactive - def foo(): - return i - - pfoo = dumps(foo) - bar = loads(pfoo) - assert foo() == bar() - - -def test_uncan_bytes_buffer(): - data = b"data" - canned = can(data) - canned.buffers = [memoryview(buf) for buf in canned.buffers] - out = uncan(canned) - assert out == data From 3cbc3b688fe7998c88e097ae2db64f9a552c7486 Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Fri, 28 Feb 2025 15:04:04 +0100 Subject: [PATCH 86/97] Remove nose import. (#1368) --- tests/utils.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/tests/utils.py b/tests/utils.py index 5bf98a051..a3a6de298 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -29,12 +29,6 @@ def start_new_kernel(**kwargs): Integrates with our output capturing for tests. """ kwargs["stderr"] = STDOUT - try: - import nose - - kwargs["stdout"] = nose.iptest_stdstreams_fileno() - except (ImportError, AttributeError): - pass return manager.start_new_kernel(startup_timeout=STARTUP_TIMEOUT, **kwargs) @@ -173,12 +167,6 @@ def new_kernel(argv=None): kernel_client: connected KernelClient instance """ kwargs = {"stderr": STDOUT} - try: - import nose - - kwargs["stdout"] = nose.iptest_stdstreams_fileno() - except (ImportError, AttributeError): - pass if argv is not None: kwargs["extra_arguments"] = argv return manager.run_kernel(**kwargs) From dab3b39e3f1e0258d99b189867d8f2e2d36c976e Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 4 Mar 2025 09:03:01 +0100 Subject: [PATCH 87/97] chore: update pre-commit hooks (#1372) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d36865156..2547aab2e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -74,7 +74,7 @@ repos: - id: rst-inline-touching-normal - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.7 + rev: v0.9.9 hooks: - id: ruff types_or: [python, jupyter] From dcdc56ea49c5434b41737026e71ce50de68b4abe Mon Sep 17 00:00:00 2001 From: David Brochart Date: Thu, 6 Mar 2025 20:37:15 +0100 Subject: [PATCH 88/97] Fix OutStream using _fid before being defined (#1373) --- ipykernel/iostream.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ipykernel/iostream.py b/ipykernel/iostream.py index cb4fc0525..8cec0f42d 100644 --- a/ipykernel/iostream.py +++ b/ipykernel/iostream.py @@ -388,6 +388,9 @@ def _watch_pipe_fd(self): """ + if self._fid is None: + return + try: bts = os.read(self._fid, PIPE_BUFFER_SIZE) while bts and self._should_watch: @@ -441,6 +444,7 @@ def __init__( self.session = session self._has_thread = False self.watch_fd_thread = None + self._fid = None if not isinstance(pub_thread, IOPubThread): # Backward-compat: given socket, not thread. Wrap in a thread. warnings.warn( From cac11a08449d41ceb8a56e3095e78e00cb1626eb Mon Sep 17 00:00:00 2001 From: David Brochart Date: Fri, 7 Mar 2025 09:20:10 +0100 Subject: [PATCH 89/97] Use zmq-anyio (#1291) Co-authored-by: M Bussonnier Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- docs/api/ipykernel.rst | 6 --- ipykernel/debugger.py | 4 +- ipykernel/heartbeat.py | 12 +++-- ipykernel/inprocess/ipkernel.py | 4 +- ipykernel/inprocess/session.py | 2 +- ipykernel/inprocess/socket.py | 6 ++- ipykernel/iostream.py | 76 +++++++++++++------------- ipykernel/ipkernel.py | 9 ++-- ipykernel/kernelapp.py | 74 +++++++++---------------- ipykernel/kernelbase.py | 87 +++++++++++++++++------------- ipykernel/shellchannel.py | 10 +++- ipykernel/subshell.py | 16 ++++-- ipykernel/subshell_manager.py | 96 +++++++++++++++++++++++---------- ipykernel/trio_runner.py | 72 ------------------------- pyproject.toml | 13 +++-- tests/conftest.py | 57 ++++++++++---------- tests/test_async.py | 15 +++--- tests/test_embed_kernel.py | 20 +++++-- tests/test_eventloop.py | 1 + tests/test_io.py | 67 +++++++++++++---------- tests/test_kernelapp.py | 2 +- tests/test_start_kernel.py | 15 ++++-- tests/test_zmq_shell.py | 81 +++++++++++++++------------- 23 files changed, 381 insertions(+), 364 deletions(-) delete mode 100644 ipykernel/trio_runner.py diff --git a/docs/api/ipykernel.rst b/docs/api/ipykernel.rst index dd46d0842..27b25c893 100644 --- a/docs/api/ipykernel.rst +++ b/docs/api/ipykernel.rst @@ -134,12 +134,6 @@ Submodules :show-inheritance: -.. automodule:: ipykernel.trio_runner - :members: - :undoc-members: - :show-inheritance: - - .. automodule:: ipykernel.zmqshell :members: :undoc-members: diff --git a/ipykernel/debugger.py b/ipykernel/debugger.py index 57804d2db..bb8f338ac 100644 --- a/ipykernel/debugger.py +++ b/ipykernel/debugger.py @@ -242,7 +242,7 @@ async def _send_request(self, msg): self.log.debug("DEBUGPYCLIENT:") self.log.debug(self.routing_id) self.log.debug(buf) - await self.debugpy_socket.send_multipart((self.routing_id, buf)) + await self.debugpy_socket.asend_multipart((self.routing_id, buf)).wait() async def _wait_for_response(self): # Since events are never pushed to the message_queue @@ -438,7 +438,7 @@ async def start(self): (self.shell_socket.getsockopt(ROUTING_ID)), ) - msg = await self.shell_socket.recv_multipart() + msg = await self.shell_socket.arecv_multipart().wait() ident, msg = self.session.feed_identities(msg, copy=True) try: msg = self.session.deserialize(msg, content=True, copy=True) diff --git a/ipykernel/heartbeat.py b/ipykernel/heartbeat.py index 7706312e1..1340ccfc6 100644 --- a/ipykernel/heartbeat.py +++ b/ipykernel/heartbeat.py @@ -92,13 +92,17 @@ def _bind_socket(self): def run(self): """Run the heartbeat thread.""" self.name = "Heartbeat" - self.socket = self.context.socket(zmq.ROUTER) - self.socket.linger = 1000 + try: + self.socket = self.context.socket(zmq.ROUTER) + self.socket.linger = 1000 self._bind_socket() except Exception: - self.socket.close() - raise + try: + self.socket.close() + except Exception: + pass + return while True: try: diff --git a/ipykernel/inprocess/ipkernel.py b/ipykernel/inprocess/ipkernel.py index c6f8c6128..efaa594bd 100644 --- a/ipykernel/inprocess/ipkernel.py +++ b/ipykernel/inprocess/ipkernel.py @@ -54,7 +54,7 @@ class InProcessKernel(IPythonKernel): _underlying_iopub_socket = Instance(DummySocket, (False,)) iopub_thread: IOPubThread = Instance(IOPubThread) # type:ignore[assignment] - shell_socket = Instance(DummySocket, (True,)) # type:ignore[arg-type] + shell_socket = Instance(DummySocket, (True,)) @default("iopub_thread") def _default_iopub_thread(self): @@ -207,7 +207,7 @@ def enable_pylab(self, gui=None, import_all=True, welcome_message=False): """Activate pylab support at runtime.""" if not gui: gui = self.kernel.gui - return super().enable_pylab(gui, import_all, welcome_message) + return super().enable_pylab(gui, import_all, welcome_message) # type: ignore[call-arg] InteractiveShellABC.register(InProcessInteractiveShell) diff --git a/ipykernel/inprocess/session.py b/ipykernel/inprocess/session.py index 390ac9954..f8634a36d 100644 --- a/ipykernel/inprocess/session.py +++ b/ipykernel/inprocess/session.py @@ -12,7 +12,7 @@ async def recv( # type: ignore[override] mode, content, copy have no effect, but are present for superclass compatibility """ - return await socket.recv_multipart() + return await socket.arecv_multipart().wait() def send( self, diff --git a/ipykernel/inprocess/socket.py b/ipykernel/inprocess/socket.py index 05b45687c..10204c97c 100644 --- a/ipykernel/inprocess/socket.py +++ b/ipykernel/inprocess/socket.py @@ -65,4 +65,8 @@ async def poll(self, timeout=0): return statistics.current_buffer_used != 0 def close(self): - pass + if self.is_shell: + self.in_send_stream.close() + self.in_receive_stream.close() + self.out_send_stream.close() + self.out_receive_stream.close() diff --git a/ipykernel/iostream.py b/ipykernel/iostream.py index 8cec0f42d..b98cbd988 100644 --- a/ipykernel/iostream.py +++ b/ipykernel/iostream.py @@ -20,6 +20,7 @@ from typing import Any, Callable import zmq +import zmq_anyio from anyio import sleep from jupyter_client.session import extract_header @@ -48,7 +49,7 @@ class IOPubThread: whose IO is always run in a thread. """ - def __init__(self, socket, pipe=False): + def __init__(self, socket: zmq_anyio.Socket, pipe: bool = False): """Create IOPub thread Parameters @@ -61,10 +62,7 @@ def __init__(self, socket, pipe=False): """ # ensure all of our sockets as sync zmq.Sockets # don't create async wrappers until we are within the appropriate coroutines - self.socket: zmq.Socket[bytes] | None = zmq.Socket(socket) - if self.socket.context is None: - # bug in pyzmq, shadow socket doesn't always inherit context attribute - self.socket.context = socket.context # type:ignore[unreachable] + self.socket: zmq_anyio.Socket = socket self._context = socket.context self.background_socket = BackgroundSocket(self) @@ -78,7 +76,7 @@ def __init__(self, socket, pipe=False): self._event_pipe_gc_lock: threading.Lock = threading.Lock() self._event_pipe_gc_seconds: float = 10 self._setup_event_pipe() - tasks = [self._handle_event, self._run_event_pipe_gc] + tasks = [self._handle_event, self._run_event_pipe_gc, self.socket.start] if pipe: tasks.append(self._handle_pipe_msgs) self.thread = BaseThread(name="IOPub", daemon=True) @@ -87,7 +85,7 @@ def __init__(self, socket, pipe=False): def _setup_event_pipe(self): """Create the PULL socket listening for events that should fire in this thread.""" - self._pipe_in0 = self._context.socket(zmq.PULL, socket_class=zmq.Socket) + self._pipe_in0 = self._context.socket(zmq.PULL) self._pipe_in0.linger = 0 _uuid = b2a_hex(os.urandom(16)).decode("ascii") @@ -99,11 +97,11 @@ async def _run_event_pipe_gc(self): while True: await sleep(self._event_pipe_gc_seconds) try: - await self._event_pipe_gc() + self._event_pipe_gc() except Exception as e: print(f"Exception in IOPubThread._event_pipe_gc: {e}", file=sys.__stderr__) - async def _event_pipe_gc(self): + def _event_pipe_gc(self): """run a single garbage collection on event pipes""" if not self._event_pipes: # don't acquire the lock if there's nothing to do @@ -122,7 +120,7 @@ def _event_pipe(self): except AttributeError: # new thread, new event pipe # create sync base socket - event_pipe = self._context.socket(zmq.PUSH, socket_class=zmq.Socket) + event_pipe = self._context.socket(zmq.PUSH) event_pipe.linger = 0 event_pipe.connect(self._event_interface) self._local.event_pipe = event_pipe @@ -141,30 +139,28 @@ async def _handle_event(self): Whenever *an* event arrives on the event stream, *all* waiting events are processed in order. """ - # create async wrapper within coroutine - pipe_in = zmq.asyncio.Socket(self._pipe_in0) - try: - while True: - await pipe_in.recv() - # freeze event count so new writes don't extend the queue - # while we are processing - n_events = len(self._events) - for _ in range(n_events): - event_f = self._events.popleft() - event_f() - except Exception: - if self.thread.stopped.is_set(): - return - raise + pipe_in = zmq_anyio.Socket(self._pipe_in0) + async with pipe_in: + try: + while True: + await pipe_in.arecv().wait() + # freeze event count so new writes don't extend the queue + # while we are processing + n_events = len(self._events) + for _ in range(n_events): + event_f = self._events.popleft() + event_f() + except Exception: + if self.thread.stopped.is_set(): + return + raise def _setup_pipe_in(self): """setup listening pipe for IOPub from forked subprocesses""" - ctx = self._context - # use UUID to authenticate pipe messages self._pipe_uuid = os.urandom(16) - self._pipe_in1 = ctx.socket(zmq.PULL, socket_class=zmq.Socket) + self._pipe_in1 = zmq_anyio.Socket(self._context.socket(zmq.PULL)) self._pipe_in1.linger = 0 try: @@ -181,19 +177,18 @@ def _setup_pipe_in(self): async def _handle_pipe_msgs(self): """handle pipe messages from a subprocess""" - # create async wrapper within coroutine - self._async_pipe_in1 = zmq.asyncio.Socket(self._pipe_in1) - try: - while True: - await self._handle_pipe_msg() - except Exception: - if self.thread.stopped.is_set(): - return - raise + async with self._pipe_in1: + try: + while True: + await self._handle_pipe_msg() + except Exception: + if self.thread.stopped.is_set(): + return + raise async def _handle_pipe_msg(self, msg=None): """handle a pipe message from a subprocess""" - msg = msg or await self._async_pipe_in1.recv_multipart() + msg = msg or await self._pipe_in1.arecv_multipart().wait() if not self._pipe_flag or not self._is_main_process(): return if msg[0] != self._pipe_uuid: @@ -246,7 +241,10 @@ def close(self): """Close the IOPub thread.""" if self.closed: return - self._pipe_in0.close() + try: + self._pipe_in0.close() + except Exception: + pass if self._pipe_flag: self._pipe_in1.close() if self.socket is not None: diff --git a/ipykernel/ipkernel.py b/ipykernel/ipkernel.py index 5ba500198..b170d55e7 100644 --- a/ipykernel/ipkernel.py +++ b/ipykernel/ipkernel.py @@ -12,7 +12,7 @@ from dataclasses import dataclass import comm -import zmq.asyncio +import zmq_anyio from anyio import TASK_STATUS_IGNORED, create_task_group, to_thread from anyio.abc import TaskStatus from IPython.core import release @@ -93,7 +93,7 @@ class IPythonKernel(KernelBase): help="Set this flag to False to deactivate the use of experimental IPython completion APIs.", ).tag(config=True) - debugpy_socket = Instance(zmq.asyncio.Socket, allow_none=True) + debugpy_socket = Instance(zmq_anyio.Socket, allow_none=True) user_module = Any() @@ -229,7 +229,8 @@ def __init__(self, **kwargs): } async def process_debugpy(self): - async with create_task_group() as tg: + assert self.debugpy_socket is not None + async with self.debug_shell_socket, self.debugpy_socket, create_task_group() as tg: tg.start_soon(self.receive_debugpy_messages) tg.start_soon(self.poll_stopped_queue) await to_thread.run_sync(self.debugpy_stop.wait) @@ -252,7 +253,7 @@ async def receive_debugpy_message(self, msg=None): if msg is None: assert self.debugpy_socket is not None - msg = await self.debugpy_socket.recv_multipart() + msg = await self.debugpy_socket.arecv_multipart().wait() # The first frame is the socket id, we can drop it frame = msg[1].decode("utf-8") self.log.debug("Debugpy received: %s", frame) diff --git a/ipykernel/kernelapp.py b/ipykernel/kernelapp.py index 676d2d46f..8078f97ce 100644 --- a/ipykernel/kernelapp.py +++ b/ipykernel/kernelapp.py @@ -19,7 +19,7 @@ from typing import Optional import zmq -import zmq.asyncio +import zmq_anyio from anyio import create_task_group, run, to_thread from IPython.core.application import ( # type:ignore[attr-defined] BaseIPythonApplication, @@ -333,15 +333,15 @@ def init_sockets(self): """Create a context, a session, and the kernel sockets.""" self.log.info("Starting the kernel at pid: %i", os.getpid()) assert self.context is None, "init_sockets cannot be called twice!" - self.context = context = zmq.asyncio.Context() + self.context = context = zmq.Context() atexit.register(self.close) - self.shell_socket = context.socket(zmq.ROUTER) + self.shell_socket = zmq_anyio.Socket(context.socket(zmq.ROUTER)) self.shell_socket.linger = 1000 self.shell_port = self._bind_socket(self.shell_socket, self.shell_port) self.log.debug("shell ROUTER Channel on port: %i", self.shell_port) - self.stdin_socket = zmq.Context(context).socket(zmq.ROUTER) + self.stdin_socket = context.socket(zmq.ROUTER) self.stdin_socket.linger = 1000 self.stdin_port = self._bind_socket(self.stdin_socket, self.stdin_port) self.log.debug("stdin ROUTER Channel on port: %i", self.stdin_port) @@ -357,18 +357,19 @@ def init_sockets(self): def init_control(self, context): """Initialize the control channel.""" - self.control_socket = context.socket(zmq.ROUTER) + self.control_socket = zmq_anyio.Socket(context.socket(zmq.ROUTER)) self.control_socket.linger = 1000 self.control_port = self._bind_socket(self.control_socket, self.control_port) self.log.debug("control ROUTER Channel on port: %i", self.control_port) - self.debugpy_socket = context.socket(zmq.STREAM) + self.debugpy_socket = zmq_anyio.Socket(context.socket(zmq.STREAM)) self.debugpy_socket.linger = 1000 - self.debug_shell_socket = context.socket(zmq.DEALER) + self.debug_shell_socket = zmq_anyio.Socket(context.socket(zmq.DEALER)) self.debug_shell_socket.linger = 1000 - if self.shell_socket.getsockopt(zmq.LAST_ENDPOINT): - self.debug_shell_socket.connect(self.shell_socket.getsockopt(zmq.LAST_ENDPOINT)) + last_endpoint = self.shell_socket.getsockopt(zmq.LAST_ENDPOINT) + if last_endpoint: + self.debug_shell_socket.connect(last_endpoint) if hasattr(zmq, "ROUTER_HANDOVER"): # set router-handover to workaround zeromq reconnect problems @@ -381,7 +382,7 @@ def init_control(self, context): def init_iopub(self, context): """Initialize the iopub channel.""" - self.iopub_socket = context.socket(zmq.PUB) + self.iopub_socket = zmq_anyio.Socket(context.socket(zmq.PUB)) self.iopub_socket.linger = 1000 self.iopub_port = self._bind_socket(self.iopub_socket, self.iopub_port) self.log.debug("iopub PUB Channel on port: %i", self.iopub_port) @@ -679,43 +680,6 @@ def configure_tornado_logger(self): handler.setFormatter(formatter) logger.addHandler(handler) - def _init_asyncio_patch(self): - """set default asyncio policy to be compatible with tornado - - Tornado 6 (at least) is not compatible with the default - asyncio implementation on Windows - - Pick the older SelectorEventLoopPolicy on Windows - if the known-incompatible default policy is in use. - - Support for Proactor via a background thread is available in tornado 6.1, - but it is still preferable to run the Selector in the main thread - instead of the background. - - do this as early as possible to make it a low priority and overridable - - ref: https://github.com/tornadoweb/tornado/issues/2608 - - FIXME: if/when tornado supports the defaults in asyncio without threads, - remove and bump tornado requirement for py38. - Most likely, this will mean a new Python version - where asyncio.ProactorEventLoop supports add_reader and friends. - - """ - if sys.platform.startswith("win"): - import asyncio - - try: - from asyncio import WindowsProactorEventLoopPolicy, WindowsSelectorEventLoopPolicy - except ImportError: - pass - # not affected - else: - if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy: - # WindowsProactorEventLoopPolicy is not compatible with tornado 6 - # fallback to the pre-3.8 default of Selector - asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy()) - def init_pdb(self): """Replace pdb with IPython's version that is interruptible. @@ -735,7 +699,6 @@ def init_pdb(self): @catch_config_error def initialize(self, argv=None) -> None: """Initialize the application.""" - self._init_asyncio_patch() super().initialize(argv) if self.subapp is not None: return @@ -772,7 +735,7 @@ def initialize(self, argv=None) -> None: sys.stdout.flush() sys.stderr.flush() - async def _start(self) -> None: + async def _start(self, backend: str) -> None: """ Async version of start, when the loop is not controlled by IPykernel @@ -783,12 +746,23 @@ async def _start(self) -> None: return if self.poller is not None: self.poller.start() + + if backend == "asyncio" and sys.platform == "win32": + import asyncio + + policy = asyncio.get_event_loop_policy() + if policy.__class__.__name__ == "WindowsProactorEventLoopPolicy": + from anyio._core._asyncio_selector_thread import get_selector + + selector = get_selector() + selector._thread.pydev_do_not_trace = True + await self.main() def start(self) -> None: """Start the application.""" backend = "trio" if self.trio_loop else "asyncio" - run(self._start, backend=backend) + run(partial(self._start, backend), backend=backend) async def _wait_to_enter_eventloop(self) -> None: await to_thread.run_sync(self.kernel._eventloop_set.wait) diff --git a/ipykernel/kernelbase.py b/ipykernel/kernelbase.py index 08f60e14e..6465751c2 100644 --- a/ipykernel/kernelbase.py +++ b/ipykernel/kernelbase.py @@ -36,6 +36,7 @@ import psutil import zmq +import zmq_anyio from anyio import TASK_STATUS_IGNORED, create_task_group, sleep, to_thread from anyio.abc import TaskStatus from IPython.core.error import StdinNotImplementedError @@ -97,7 +98,7 @@ class Kernel(SingletonConfigurable): session = Instance(Session, allow_none=True) profile_dir = Instance("IPython.core.profiledir.ProfileDir", allow_none=True) - shell_socket = Instance(zmq.asyncio.Socket, allow_none=True) + shell_socket = Instance(zmq_anyio.Socket, allow_none=True) implementation: str implementation_version: str @@ -105,7 +106,7 @@ class Kernel(SingletonConfigurable): _is_test = Bool(False) - control_socket = Instance(zmq.asyncio.Socket, allow_none=True) + control_socket = Instance(zmq_anyio.Socket, allow_none=True) control_tasks: t.Any = List() debug_shell_socket = Any() @@ -278,7 +279,7 @@ async def process_control_message(self, msg=None): assert self.session is not None assert self.control_thread is None or threading.current_thread() == self.control_thread - msg = msg or await self.control_socket.recv_multipart() + msg = msg or await self.control_socket.arecv_multipart().wait() idents, msg = self.session.feed_identities(msg) try: msg = self.session.deserialize(msg, content=True) @@ -375,26 +376,31 @@ async def shell_channel_thread_main(self): assert self.shell_channel_thread is not None assert threading.current_thread() == self.shell_channel_thread - try: - while True: - msg = await self.shell_socket.recv_multipart(copy=False) - # deserialize only the header to get subshell_id - # Keep original message to send to subshell_id unmodified. - _, msg2 = self.session.feed_identities(msg, copy=False) - try: - msg3 = self.session.deserialize(msg2, content=False, copy=False) - subshell_id = msg3["header"].get("subshell_id") - - # Find inproc pair socket to use to send message to correct subshell. - socket = self.shell_channel_thread.manager.get_shell_channel_socket(subshell_id) - assert socket is not None - socket.send_multipart(msg, copy=False) - except Exception: - self.log.error("Invalid message", exc_info=True) # noqa: G201 - except BaseException: - if self.shell_stop.is_set(): - return - raise + async with self.shell_socket, create_task_group() as tg: + try: + while True: + msg = await self.shell_socket.arecv_multipart(copy=False).wait() + # deserialize only the header to get subshell_id + # Keep original message to send to subshell_id unmodified. + _, msg2 = self.session.feed_identities(msg, copy=False) + try: + msg3 = self.session.deserialize(msg2, content=False, copy=False) + subshell_id = msg3["header"].get("subshell_id") + + # Find inproc pair socket to use to send message to correct subshell. + socket = self.shell_channel_thread.manager.get_shell_channel_socket( + subshell_id + ) + assert socket is not None + if not socket.started.is_set(): + await tg.start(socket.start) + await socket.asend_multipart(msg, copy=False).wait() + except Exception: + self.log.error("Invalid message", exc_info=True) # noqa: G201 + except BaseException: + if self.shell_stop.is_set(): + return + raise async def shell_main(self, subshell_id: str | None): """Main loop for a single subshell.""" @@ -414,6 +420,8 @@ async def shell_main(self, subshell_id: str | None): socket = None async with create_task_group() as tg: + if not socket.started.is_set(): + await tg.start(socket.start) tg.start_soon(self.process_shell, socket) if subshell_id is None: # Main subshell. @@ -446,8 +454,8 @@ async def process_shell_message(self, msg=None, socket=None): assert socket is None socket = self.shell_socket - no_msg = msg is None if self._is_test else not await socket.poll(0) - msg = msg or await socket.recv_multipart(copy=False) + no_msg = msg is None if self._is_test else not await socket.apoll(0).wait() + msg = msg or await socket.arecv_multipart(copy=False).wait() received_time = time.monotonic() copy = not isinstance(msg[0], zmq.Message) @@ -520,7 +528,8 @@ async def process_shell_message(self, msg=None, socket=None): self._publish_status("idle", "shell") async def control_main(self): - async with create_task_group() as tg: + assert self.control_socket is not None + async with self.control_socket, create_task_group() as tg: for task in self.control_tasks: tg.start_soon(task) tg.start_soon(self.process_control) @@ -557,7 +566,7 @@ async def start(self, *, task_status: TaskStatus = TASK_STATUS_IGNORED) -> None: manager = self.shell_channel_thread.manager self.shell_channel_thread.start_soon(self.shell_channel_thread_main) self.shell_channel_thread.start_soon( - partial(manager.listen_from_control, self.shell_main) + partial(manager.listen_from_control, self.shell_main, self.shell_channel_thread) ) self.shell_channel_thread.start_soon(manager.listen_from_subshells) self.shell_channel_thread.start() @@ -1137,9 +1146,11 @@ async def create_subshell_request(self, socket, ident, parent) -> None: # This should only be called in the control thread if it exists. # Request is passed to shell channel thread to process. - other_socket = self.shell_channel_thread.manager.get_control_other_socket() - await other_socket.send_json({"type": "create"}) - reply = await other_socket.recv_json() + other_socket = await self.shell_channel_thread.manager.get_control_other_socket( + self.control_thread + ) + await other_socket.asend_json({"type": "create"}).wait() + reply = await other_socket.arecv_json().wait() self.session.send(socket, "create_subshell_reply", reply, parent, ident) @@ -1159,9 +1170,11 @@ async def delete_subshell_request(self, socket, ident, parent) -> None: # This should only be called in the control thread if it exists. # Request is passed to shell channel thread to process. - other_socket = self.shell_channel_thread.manager.get_control_other_socket() - await other_socket.send_json({"type": "delete", "subshell_id": subshell_id}) - reply = await other_socket.recv_json() + other_socket = await self.shell_channel_thread.manager.get_control_other_socket( + self.control_thread + ) + await other_socket.asend_json({"type": "delete", "subshell_id": subshell_id}).wait() + reply = await other_socket.arecv_json().wait() self.session.send(socket, "delete_subshell_reply", reply, parent, ident) @@ -1174,9 +1187,11 @@ async def list_subshell_request(self, socket, ident, parent) -> None: # This should only be called in the control thread if it exists. # Request is passed to shell channel thread to process. - other_socket = self.shell_channel_thread.manager.get_control_other_socket() - await other_socket.send_json({"type": "list"}) - reply = await other_socket.recv_json() + other_socket = await self.shell_channel_thread.manager.get_control_other_socket( + self.control_thread + ) + await other_socket.asend_json({"type": "list"}).wait() + reply = await other_socket.arecv_json().wait() self.session.send(socket, "list_subshell_reply", reply, parent, ident) diff --git a/ipykernel/shellchannel.py b/ipykernel/shellchannel.py index 10abdb359..30ea6437b 100644 --- a/ipykernel/shellchannel.py +++ b/ipykernel/shellchannel.py @@ -1,6 +1,7 @@ """A thread for a shell channel.""" -import zmq.asyncio +import zmq +import zmq_anyio from .subshell_manager import SubshellManager from .thread import SHELL_CHANNEL_THREAD_NAME, BaseThread @@ -12,7 +13,12 @@ class ShellChannelThread(BaseThread): Communicates with shell/subshell threads via pairs of ZMQ inproc sockets. """ - def __init__(self, context: zmq.asyncio.Context, shell_socket: zmq.asyncio.Socket, **kwargs): + def __init__( + self, + context: zmq.Context, # type: ignore[type-arg] + shell_socket: zmq_anyio.Socket, + **kwargs, + ): """Initialize the thread.""" super().__init__(name=SHELL_CHANNEL_THREAD_NAME, **kwargs) self._manager: SubshellManager | None = None diff --git a/ipykernel/subshell.py b/ipykernel/subshell.py index 18e15ab38..180e9ecb3 100644 --- a/ipykernel/subshell.py +++ b/ipykernel/subshell.py @@ -2,7 +2,8 @@ from threading import current_thread -import zmq.asyncio +import zmq +import zmq_anyio from .thread import BaseThread @@ -15,17 +16,22 @@ def __init__(self, subshell_id: str, **kwargs): super().__init__(name=f"subshell-{subshell_id}", **kwargs) # Inproc PAIR socket, for communication with shell channel thread. - self._pair_socket: zmq.asyncio.Socket | None = None + self._pair_socket: zmq_anyio.Socket | None = None - async def create_pair_socket(self, context: zmq.asyncio.Context, address: str) -> None: + async def create_pair_socket( + self, + context: zmq.Context, # type: ignore[type-arg] + address: str, + ) -> None: """Create inproc PAIR socket, for communication with shell channel thread. - Should be called from this thread, so usually via add_task before the + Should be called from this thread, so usually via start_soon before the thread is started. """ assert current_thread() == self - self._pair_socket = context.socket(zmq.PAIR) + self._pair_socket = zmq_anyio.Socket(context, zmq.PAIR) self._pair_socket.connect(address) + self.start_soon(self._pair_socket.start) def run(self) -> None: try: diff --git a/ipykernel/subshell_manager.py b/ipykernel/subshell_manager.py index 14c4c57c3..f4f92d2da 100644 --- a/ipykernel/subshell_manager.py +++ b/ipykernel/subshell_manager.py @@ -11,17 +11,18 @@ from threading import Lock, current_thread, main_thread import zmq -import zmq.asyncio +import zmq_anyio from anyio import create_memory_object_stream, create_task_group +from anyio.abc import TaskGroup from .subshell import SubshellThread -from .thread import SHELL_CHANNEL_THREAD_NAME +from .thread import SHELL_CHANNEL_THREAD_NAME, BaseThread @dataclass class Subshell: thread: SubshellThread - shell_channel_socket: zmq.asyncio.Socket + shell_channel_socket: zmq_anyio.Socket class SubshellManager: @@ -39,10 +40,14 @@ class SubshellManager: against multiple subshells attempting to send at the same time. """ - def __init__(self, context: zmq.asyncio.Context, shell_socket: zmq.asyncio.Socket): + def __init__( + self, + context: zmq.Context, # type: ignore[type-arg] + shell_socket: zmq_anyio.Socket, + ): assert current_thread() == main_thread() - self._context: zmq.asyncio.Context = context + self._context: zmq.Context = context # type: ignore[type-arg] self._shell_socket = shell_socket self._cache: dict[str, Subshell] = {} self._lock_cache = Lock() @@ -51,15 +56,39 @@ def __init__(self, context: zmq.asyncio.Context, shell_socket: zmq.asyncio.Socke # Inproc pair sockets for control channel and main shell (parent subshell). # Each inproc pair has a "shell_channel" socket used in the shell channel # thread, and an "other" socket used in the other thread. - self._control_shell_channel_socket = self._create_inproc_pair_socket("control", True) - self._control_other_socket = self._create_inproc_pair_socket("control", False) - self._parent_shell_channel_socket = self._create_inproc_pair_socket(None, True) - self._parent_other_socket = self._create_inproc_pair_socket(None, False) + self.__control_shell_channel_socket: zmq_anyio.Socket | None = None + self.__control_other_socket: zmq_anyio.Socket | None = None + self.__parent_shell_channel_socket: zmq_anyio.Socket | None = None + self.__parent_other_socket: zmq_anyio.Socket | None = None # anyio memory object stream for async queue-like communication between tasks. # Used by _create_subshell to tell listen_from_subshells to spawn a new task. self._send_stream, self._receive_stream = create_memory_object_stream[str]() + @property + def _control_shell_channel_socket(self) -> zmq_anyio.Socket: + if self.__control_shell_channel_socket is None: + self.__control_shell_channel_socket = self._create_inproc_pair_socket("control", True) + return self.__control_shell_channel_socket + + @property + def _control_other_socket(self) -> zmq_anyio.Socket: + if self.__control_other_socket is None: + self.__control_other_socket = self._create_inproc_pair_socket("control", False) + return self.__control_other_socket + + @property + def _parent_shell_channel_socket(self) -> zmq_anyio.Socket: + if self.__parent_shell_channel_socket is None: + self.__parent_shell_channel_socket = self._create_inproc_pair_socket(None, True) + return self.__parent_shell_channel_socket + + @property + def _parent_other_socket(self) -> zmq_anyio.Socket: + if self.__parent_other_socket is None: + self.__parent_other_socket = self._create_inproc_pair_socket(None, False) + return self.__parent_other_socket + def close(self) -> None: """Stop all subshells and close all resources.""" assert current_thread().name == SHELL_CHANNEL_THREAD_NAME @@ -68,10 +97,10 @@ def close(self) -> None: self._receive_stream.close() for socket in ( - self._control_shell_channel_socket, - self._control_other_socket, - self._parent_shell_channel_socket, - self._parent_other_socket, + self.__control_shell_channel_socket, + self.__control_other_socket, + self.__parent_shell_channel_socket, + self.__parent_other_socket, ): if socket is not None: socket.close() @@ -84,10 +113,17 @@ def close(self) -> None: break self._stop_subshell(subshell) - def get_control_other_socket(self) -> zmq.asyncio.Socket: + async def get_control_other_socket(self, thread: BaseThread) -> zmq_anyio.Socket: + if not self._control_other_socket.started.is_set(): + await thread.task_group.start(self._control_other_socket.start) return self._control_other_socket - def get_other_socket(self, subshell_id: str | None) -> zmq.asyncio.Socket: + async def get_control_shell_channel_socket(self, thread: BaseThread) -> zmq_anyio.Socket: + if not self._control_shell_channel_socket.started.is_set(): + await thread.task_group.start(self._control_shell_channel_socket.start) + return self._control_shell_channel_socket + + def get_other_socket(self, subshell_id: str | None) -> zmq_anyio.Socket: """Return the other inproc pair socket for a subshell. This socket is accessed from the subshell thread. @@ -99,7 +135,7 @@ def get_other_socket(self, subshell_id: str | None) -> zmq.asyncio.Socket: assert socket is not None return socket - def get_shell_channel_socket(self, subshell_id: str | None) -> zmq.asyncio.Socket: + def get_shell_channel_socket(self, subshell_id: str | None) -> zmq_anyio.Socket: """Return the shell channel inproc pair socket for a subshell. This socket is accessed from the shell channel thread. @@ -117,17 +153,17 @@ def list_subshell(self) -> list[str]: with self._lock_cache: return list(self._cache) - async def listen_from_control(self, subshell_task: t.Any) -> None: + async def listen_from_control(self, subshell_task: t.Any, thread: BaseThread) -> None: """Listen for messages on the control inproc socket, handle those messages and return replies on the same socket. Runs in the shell channel thread. """ assert current_thread().name == SHELL_CHANNEL_THREAD_NAME - socket = self._control_shell_channel_socket + socket = await self.get_control_shell_channel_socket(thread) while True: - request = await socket.recv_json() + request = await socket.arecv_json().wait() reply = await self._process_control_request(request, subshell_task) - await socket.send_json(reply) + await socket.asend_json(reply).wait() async def listen_from_subshells(self) -> None: """Listen for reply messages on inproc sockets of all subshells and resend @@ -138,9 +174,9 @@ async def listen_from_subshells(self) -> None: assert current_thread().name == SHELL_CHANNEL_THREAD_NAME async with create_task_group() as tg: - tg.start_soon(self._listen_for_subshell_reply, None) + tg.start_soon(self._listen_for_subshell_reply, None, tg) async for subshell_id in self._receive_stream: - tg.start_soon(self._listen_for_subshell_reply, subshell_id) + tg.start_soon(self._listen_for_subshell_reply, subshell_id, tg) def subshell_id_from_thread_id(self, thread_id: int) -> str | None: """Return subshell_id of the specified thread_id. @@ -160,10 +196,10 @@ def subshell_id_from_thread_id(self, thread_id: int) -> str | None: def _create_inproc_pair_socket( self, name: str | None, shell_channel_end: bool - ) -> zmq.asyncio.Socket: + ) -> zmq_anyio.Socket: """Create and return a single ZMQ inproc pair socket.""" address = self._get_inproc_socket_address(name) - socket = self._context.socket(zmq.PAIR) + socket = zmq_anyio.Socket(self._context, zmq.PAIR) if shell_channel_end: socket.bind(address) else: @@ -209,7 +245,7 @@ def _get_inproc_socket_address(self, name: str | None) -> str: full_name = f"subshell-{name}" if name else "subshell" return f"inproc://{full_name}" - def _get_shell_channel_socket(self, subshell_id: str | None) -> zmq.asyncio.Socket: + def _get_shell_channel_socket(self, subshell_id: str | None) -> zmq_anyio.Socket: if subshell_id is None: return self._parent_shell_channel_socket with self._lock_cache: @@ -221,7 +257,9 @@ def _is_subshell(self, subshell_id: str | None) -> bool: with self._lock_cache: return subshell_id in self._cache - async def _listen_for_subshell_reply(self, subshell_id: str | None) -> None: + async def _listen_for_subshell_reply( + self, subshell_id: str | None, task_group: TaskGroup + ) -> None: """Listen for reply messages on specified subshell inproc socket and resend to the client via the shell_socket. @@ -231,11 +269,13 @@ async def _listen_for_subshell_reply(self, subshell_id: str | None) -> None: shell_channel_socket = self._get_shell_channel_socket(subshell_id) + if not shell_channel_socket.started.is_set(): + await task_group.start(shell_channel_socket.start) try: while True: - msg = await shell_channel_socket.recv_multipart(copy=False) + msg = await shell_channel_socket.arecv_multipart(copy=False).wait() with self._lock_shell_socket: - await self._shell_socket.send_multipart(msg) + await self._shell_socket.asend_multipart(msg).wait() except BaseException: if not self._is_subshell(subshell_id): # Subshell no longer exists so exit gracefully diff --git a/ipykernel/trio_runner.py b/ipykernel/trio_runner.py deleted file mode 100644 index 6fb44107b..000000000 --- a/ipykernel/trio_runner.py +++ /dev/null @@ -1,72 +0,0 @@ -"""A trio loop runner.""" - -import builtins -import logging -import signal -import threading -import traceback -import warnings - -import trio - - -class TrioRunner: - """A trio loop runner.""" - - def __init__(self): - """Initialize the runner.""" - self._cell_cancel_scope = None - self._trio_token = None - - def initialize(self, kernel, io_loop): - """Initialize the runner.""" - kernel.shell.set_trio_runner(self) - kernel.shell.run_line_magic("autoawait", "trio") - kernel.shell.magics_manager.magics["line"]["autoawait"] = lambda _: warnings.warn( - "Autoawait isn't allowed in Trio background loop mode.", stacklevel=2 - ) - self._interrupted = False - bg_thread = threading.Thread(target=io_loop.start, daemon=True, name="TornadoBackground") - bg_thread.start() - - def interrupt(self, signum, frame): - """Interrupt the runner.""" - if self._cell_cancel_scope: - self._cell_cancel_scope.cancel() - else: - msg = "Kernel interrupted but no cell is running" - raise Exception(msg) - - def run(self): - """Run the loop.""" - old_sig = signal.signal(signal.SIGINT, self.interrupt) - - def log_nursery_exc(exc): - exc = "\n".join(traceback.format_exception(type(exc), exc, exc.__traceback__)) - logging.error("An exception occurred in a global nursery task.\n%s", exc) - - async def trio_main(): - """Run the main loop.""" - self._trio_token = trio.lowlevel.current_trio_token() - async with trio.open_nursery() as nursery: - # TODO This hack prevents the nursery from cancelling all child - # tasks when an uncaught exception occurs, but it's ugly. - nursery._add_exc = log_nursery_exc - builtins.GLOBAL_NURSERY = nursery # type:ignore[attr-defined] - await trio.sleep_forever() - - trio.run(trio_main) - signal.signal(signal.SIGINT, old_sig) - - def __call__(self, async_fn): - """Handle a function call.""" - - async def loc(coro): - """A thread runner context.""" - self._cell_cancel_scope = trio.CancelScope() - with self._cell_cancel_scope: - return await coro - self._cell_cancel_scope = None # type:ignore[unreachable] - return None - - return trio.from_thread.run(loc, async_fn, trio_token=self._trio_token) diff --git a/pyproject.toml b/pyproject.toml index 908ac5370..1853544fa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -29,10 +29,11 @@ dependencies = [ "nest_asyncio>=1.4", "matplotlib-inline>=0.1", 'appnope>=0.1.2;platform_system=="Darwin"', - "pyzmq>=25.0", + "pyzmq>=26.0", "psutil>=5.7", "packaging>=22", - "anyio>=4.2.0", + "anyio>=4.8.0,<5.0.0", + "zmq-anyio >=0.3.6", ] [project.urls] @@ -230,9 +231,12 @@ filterwarnings= [ # ignore unclosed sqlite in traits "ignore:unclosed database in .trigger_timeout' was never awaited", + "ignore: Unclosed socket", + # ignore deprecated non async during tests: "always:For consistency across implementations, it is recommended that:PendingDeprecationWarning", - ] [tool.coverage.report] @@ -342,3 +346,6 @@ ignore = ["W002"] [tool.repo-review] ignore = ["PY007", "PP308", "GH102", "MY101"] + +[tool.hatch.metadata] +allow-direct-references = true diff --git a/tests/conftest.py b/tests/conftest.py index 76e780af3..32524e0ca 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,15 +1,15 @@ -import asyncio +import gc import logging -import os import warnings from math import inf +from threading import Event from typing import Any, Callable, no_type_check from unittest.mock import MagicMock import pytest import zmq -import zmq.asyncio -from anyio import create_memory_object_stream, create_task_group +import zmq_anyio +from anyio import create_memory_object_stream, create_task_group, sleep from anyio.streams.memory import MemoryObjectReceiveStream, MemoryObjectSendStream from jupyter_client.session import Session @@ -17,6 +17,12 @@ from ipykernel.kernelbase import Kernel from ipykernel.zmqshell import ZMQInteractiveShell + +@pytest.fixture(scope="session", autouse=True) +def _garbage_collection(request): + gc.collect() + + try: import resource except ImportError: @@ -28,12 +34,6 @@ except ModuleNotFoundError: tracemalloc = None - -@pytest.fixture() -def anyio_backend(): - return "asyncio" - - pytestmark = pytest.mark.anyio @@ -52,11 +52,6 @@ def anyio_backend(): resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard)) -# Enforce selector event loop on Windows. -if os.name == "nt": - asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) # type:ignore - - class TestSession(Session): """A session that copies sent messages to an internal stream, so that they can be accessed later. @@ -83,21 +78,21 @@ def send(self, socket, *args, **kwargs): class KernelMixin: - shell_socket: zmq.asyncio.Socket - control_socket: zmq.asyncio.Socket + shell_socket: zmq_anyio.Socket + control_socket: zmq_anyio.Socket stop: Callable[[], None] log = logging.getLogger() def _initialize(self): self._is_test = True - self.context = context = zmq.asyncio.Context() - self.iopub_socket = context.socket(zmq.PUB) - self.stdin_socket = context.socket(zmq.ROUTER) + self.context = context = zmq.Context() + self.iopub_socket = zmq_anyio.Socket(context.socket(zmq.PUB)) + self.stdin_socket = zmq_anyio.Socket(context.socket(zmq.ROUTER)) self.test_sockets = [self.iopub_socket] for name in ["shell", "control"]: - socket = context.socket(zmq.ROUTER) + socket = zmq_anyio.Socket(context.socket(zmq.ROUTER)) self.test_sockets.append(socket) setattr(self, f"{name}_socket", socket) @@ -148,7 +143,7 @@ def _prep_msg(self, *args, **kwargs): async def _wait_for_msg(self): while not self._reply: - await asyncio.sleep(0.1) + await sleep(0.1) _, msg = self.session.feed_identities(self._reply) return self.session.deserialize(msg) @@ -172,6 +167,8 @@ class MockKernel(KernelMixin, Kernel): # type:ignore def __init__(self, *args, **kwargs): self._initialize() self.shell = MagicMock() + self.shell_stop = Event() + self.control_stop = Event() super().__init__(*args, **kwargs) async def do_execute( @@ -193,6 +190,8 @@ async def do_execute( class MockIPyKernel(KernelMixin, IPythonKernel): # type:ignore def __init__(self, *args, **kwargs): self._initialize() + self.shell_stop = Event() + self.control_stop = Event() super().__init__(*args, **kwargs) @@ -201,8 +200,10 @@ async def kernel(anyio_backend): async with create_task_group() as tg: kernel = MockKernel() tg.start_soon(kernel.start) - yield kernel - kernel.destroy() + try: + yield kernel + finally: + kernel.destroy() @pytest.fixture() @@ -210,9 +211,11 @@ async def ipkernel(anyio_backend): async with create_task_group() as tg: kernel = MockIPyKernel() tg.start_soon(kernel.start) - yield kernel - kernel.destroy() - ZMQInteractiveShell.clear_instance() + try: + yield kernel + finally: + kernel.destroy() + ZMQInteractiveShell.clear_instance() @pytest.fixture() diff --git a/tests/test_async.py b/tests/test_async.py index a40db4a00..c2dd980b9 100644 --- a/tests/test_async.py +++ b/tests/test_async.py @@ -30,24 +30,23 @@ def test_async_await(): assert content["status"] == "ok", content -# FIXME: @pytest.mark.parametrize("asynclib", ["asyncio", "trio", "curio"]) @pytest.mark.skipif(os.name == "nt", reason="Cannot interrupt on Windows") -@pytest.mark.parametrize("asynclib", ["asyncio"]) -def test_async_interrupt(asynclib, request): +@pytest.mark.parametrize("anyio_backend", ["asyncio"]) # FIXME: %autoawait trio +def test_async_interrupt(anyio_backend, request): assert KC is not None assert KM is not None try: - __import__(asynclib) + __import__(anyio_backend) except ImportError: - pytest.skip("Requires %s" % asynclib) - request.addfinalizer(lambda: execute("%autoawait asyncio", KC)) + pytest.skip("Requires %s" % anyio_backend) + request.addfinalizer(lambda: execute(f"%autoawait {anyio_backend}", KC)) flush_channels(KC) - msg_id, content = execute("%autoawait " + asynclib, KC) + msg_id, content = execute(f"%autoawait {anyio_backend}", KC) assert content["status"] == "ok", content flush_channels(KC) - msg_id = KC.execute(f"print('begin'); import {asynclib}; await {asynclib}.sleep(5)") + msg_id = KC.execute(f"print('begin'); import {anyio_backend}; await {anyio_backend}.sleep(5)") busy = KC.get_iopub_msg(timeout=TIMEOUT) validate_message(busy, "status", msg_id) assert busy["content"]["execution_state"] == "busy" diff --git a/tests/test_embed_kernel.py b/tests/test_embed_kernel.py index 0c74dd1f0..37d1aaf46 100644 --- a/tests/test_embed_kernel.py +++ b/tests/test_embed_kernel.py @@ -145,7 +145,10 @@ def test_embed_kernel_namespace(): with setup_kernel(cmd) as client: # oinfo a (int) client.inspect("a") - msg = client.get_shell_msg(timeout=TIMEOUT) + while True: + msg = client.get_shell_msg(timeout=TIMEOUT) + if msg["msg_type"] == "inspect_reply": + break content = msg["content"] assert content["found"] text = content["data"]["text/plain"] @@ -153,7 +156,10 @@ def test_embed_kernel_namespace(): # oinfo b (str) client.inspect("b") - msg = client.get_shell_msg(timeout=TIMEOUT) + while True: + msg = client.get_shell_msg(timeout=TIMEOUT) + if msg["msg_type"] == "inspect_reply": + break content = msg["content"] assert content["found"] text = content["data"]["text/plain"] @@ -161,7 +167,10 @@ def test_embed_kernel_namespace(): # oinfo c (undefined) client.inspect("c") - msg = client.get_shell_msg(timeout=TIMEOUT) + while True: + msg = client.get_shell_msg(timeout=TIMEOUT) + if msg["msg_type"] == "inspect_reply": + break content = msg["content"] assert not content["found"] @@ -186,7 +195,10 @@ def test_embed_kernel_reentrant(): with setup_kernel(cmd) as client: for i in range(5): client.inspect("count") - msg = client.get_shell_msg(timeout=TIMEOUT) + while True: + msg = client.get_shell_msg(timeout=TIMEOUT) + if msg["msg_type"] == "inspect_reply": + break content = msg["content"] assert content["found"] text = content["data"]["text/plain"] diff --git a/tests/test_eventloop.py b/tests/test_eventloop.py index 7dc0106c8..e4aef5711 100644 --- a/tests/test_eventloop.py +++ b/tests/test_eventloop.py @@ -79,6 +79,7 @@ def do_thing(): @windows_skip +@pytest.mark.parametrize("anyio_backend", ["asyncio"]) def test_asyncio_loop(kernel): def do_thing(): loop.call_later(0.01, loop.stop) diff --git a/tests/test_io.py b/tests/test_io.py index e3ff28159..17f955af6 100644 --- a/tests/test_io.py +++ b/tests/test_io.py @@ -12,31 +12,42 @@ import pytest import zmq -import zmq.asyncio +import zmq_anyio +from anyio import create_task_group from jupyter_client.session import Session from ipykernel.iostream import _PARENT, BackgroundSocket, IOPubThread, OutStream +pytestmark = pytest.mark.anyio + @pytest.fixture() def ctx(): - ctx = zmq.asyncio.Context() + ctx = zmq.Context() yield ctx ctx.destroy() @pytest.fixture() -def iopub_thread(ctx): - with ctx.socket(zmq.PUB) as pub: - thread = IOPubThread(pub) - thread.start() - - yield thread - thread.stop() - thread.close() - - -def test_io_api(iopub_thread): +async def iopub_thread(ctx): + try: + async with create_task_group() as tg: + pub = zmq_anyio.Socket(ctx.socket(zmq.PUB)) + await tg.start(pub.start) + thread = IOPubThread(pub) + thread.start() + + try: + yield thread + finally: + await pub.stop() + thread.stop() + thread.close() + except BaseException: + pass + + +async def test_io_api(iopub_thread): """Test that wrapped stdout has the same API as a normal TextIO object""" session = Session() stream = OutStream(session, iopub_thread, "stdout") @@ -59,13 +70,13 @@ def test_io_api(iopub_thread): stream.write(b"") # type:ignore -def test_io_isatty(iopub_thread): +async def test_io_isatty(iopub_thread): session = Session() stream = OutStream(session, iopub_thread, "stdout", isatty=True) assert stream.isatty() -async def test_io_thread(anyio_backend, iopub_thread): +async def test_io_thread(iopub_thread): thread = iopub_thread thread._setup_pipe_in() msg = [thread._pipe_uuid, b"a"] @@ -77,11 +88,9 @@ async def test_io_thread(anyio_backend, iopub_thread): thread._really_send([b"hi"]) ctx1.destroy() thread.stop() - thread.close() - thread._really_send(None) -async def test_background_socket(anyio_backend, iopub_thread): +async def test_background_socket(iopub_thread): sock = BackgroundSocket(iopub_thread) assert sock.__class__ == BackgroundSocket with warnings.catch_warnings(): @@ -92,7 +101,7 @@ async def test_background_socket(anyio_backend, iopub_thread): sock.send(b"hi") -async def test_outstream(anyio_backend, iopub_thread): +async def test_outstream(iopub_thread): session = Session() pub = iopub_thread.socket @@ -118,7 +127,7 @@ async def test_outstream(anyio_backend, iopub_thread): assert stream.writable() -@pytest.mark.anyio() +@pytest.mark.skip(reason="Cannot use a zmq-anyio socket on different threads") async def test_event_pipe_gc(iopub_thread): session = Session(key=b"abc") stream = OutStream( @@ -139,7 +148,7 @@ async def test_event_pipe_gc(iopub_thread): f: Future = Future() try: - await iopub_thread._event_pipe_gc() + iopub_thread._event_pipe_gc() except Exception as e: f.set_exception(e) else: @@ -150,12 +159,13 @@ async def test_event_pipe_gc(iopub_thread): # assert iopub_thread._event_pipes == {} -def subprocess_test_echo_watch(): +async def subprocess_test_echo_watch(): # handshake Pub subscription session = Session(key=b"abc") # use PUSH socket to avoid subscription issues - with zmq.asyncio.Context() as ctx, ctx.socket(zmq.PUSH) as pub: + with zmq.Context() as ctx: + pub = zmq_anyio.Socket(ctx.socket(zmq.PUSH)) pub.connect(os.environ["IOPUB_URL"]) iopub_thread = IOPubThread(pub) iopub_thread.start() @@ -192,19 +202,18 @@ def subprocess_test_echo_watch(): iopub_thread.close() -@pytest.mark.anyio() @pytest.mark.skipif(sys.platform.startswith("win"), reason="Windows") async def test_echo_watch(ctx): """Test echo on underlying FD while capturing the same FD Test runs in a subprocess to avoid messing with pytest output capturing. """ - s = ctx.socket(zmq.PULL) + s = zmq_anyio.Socket(ctx.socket(zmq.PULL)) port = s.bind_to_random_port("tcp://127.0.0.1") url = f"tcp://127.0.0.1:{port}" session = Session(key=b"abc") stdout_chunks = [] - with s: + async with s: env = dict(os.environ) env["IOPUB_URL"] = url env["PYTHONUNBUFFERED"] = "1" @@ -213,7 +222,7 @@ async def test_echo_watch(ctx): [ sys.executable, "-c", - f"import {__name__}; {__name__}.subprocess_test_echo_watch()", + f"import {__name__}, anyio; anyio.run({__name__}.subprocess_test_echo_watch)", ], env=env, capture_output=True, @@ -224,8 +233,8 @@ async def test_echo_watch(ctx): print(f"{p.stdout=}") print(f"{p.stderr}=", file=sys.stderr) assert p.returncode == 0 - while await s.poll(timeout=100): - msg = await s.recv_multipart() + while await s.apoll(timeout=100).wait(): + msg = await s.arecv_multipart().wait() ident, msg = session.feed_identities(msg, copy=True) msg = session.deserialize(msg, content=True, copy=True) assert msg is not None # for type narrowing diff --git a/tests/test_kernelapp.py b/tests/test_kernelapp.py index 0f1d04373..cc010740d 100644 --- a/tests/test_kernelapp.py +++ b/tests/test_kernelapp.py @@ -130,7 +130,7 @@ async def trigger_stop(): app.kernel = MockKernel() app.init_sockets() async with trio.open_nursery() as nursery: - nursery.start_soon(app._start) + nursery.start_soon(lambda: app._start("trio")) nursery.start_soon(trigger_stop) app.cleanup_connection_file() app.kernel.destroy() diff --git a/tests/test_start_kernel.py b/tests/test_start_kernel.py index 71f4bdc0a..b8eaf22d9 100644 --- a/tests/test_start_kernel.py +++ b/tests/test_start_kernel.py @@ -32,7 +32,10 @@ def test_ipython_start_kernel_userns(): with setup_kernel(cmd) as client: client.inspect("custom") - msg = client.get_shell_msg(timeout=TIMEOUT) + while True: + msg = client.get_shell_msg(timeout=TIMEOUT) + if msg["msg_type"] == "inspect_reply": + break content = msg["content"] assert content["found"] text = content["data"]["text/plain"] @@ -44,7 +47,10 @@ def test_ipython_start_kernel_userns(): content = msg["content"] assert content["status"] == "ok" client.inspect("usermod") - msg = client.get_shell_msg(timeout=TIMEOUT) + while True: + msg = client.get_shell_msg(timeout=TIMEOUT) + if msg["msg_type"] == "inspect_reply": + break content = msg["content"] assert content["found"] text = content["data"]["text/plain"] @@ -68,7 +74,10 @@ def test_ipython_start_kernel_no_userns(): content = msg["content"] assert content["status"] == "ok" client.inspect("usermod") - msg = client.get_shell_msg(timeout=TIMEOUT) + while True: + msg = client.get_shell_msg(timeout=TIMEOUT) + if msg["msg_type"] == "inspect_reply": + break content = msg["content"] assert content["found"] text = content["data"]["text/plain"] diff --git a/tests/test_zmq_shell.py b/tests/test_zmq_shell.py index 8a8fe042b..33d23a59e 100644 --- a/tests/test_zmq_shell.py +++ b/tests/test_zmq_shell.py @@ -211,46 +211,53 @@ def test_unregister_hook(self): def test_magics(tmp_path): - context = zmq.Context() - socket = context.socket(zmq.PUB) - shell = InteractiveShell() - shell.user_ns["hi"] = 1 - magics = KernelMagics(shell) - - tmp_file = tmp_path / "test.txt" - tmp_file.write_text("hi", "utf8") - magics.edit(str(tmp_file)) - payload = shell.payload_manager.read_payload()[0] - assert payload["filename"] == str(tmp_file) - - magics.clear([]) - magics.less(str(tmp_file)) - if os.name == "posix": - magics.man("ls") - magics.autosave("10") - - socket.close() - context.destroy() + try: + context = zmq.Context() + socket = context.socket(zmq.PUB) + shell = InteractiveShell() + shell.user_ns["hi"] = 1 + magics = KernelMagics(shell) + + tmp_file = tmp_path / "test.txt" + tmp_file.write_text("hi", "utf8") + magics.edit(str(tmp_file)) + payload = shell.payload_manager.read_payload()[0] + assert payload["filename"] == str(tmp_file) + + magics.clear([]) + magics.less(str(tmp_file)) + if os.name == "posix": + magics.man("ls") + magics.autosave("10") + finally: + socket.close() + context.destroy() + shell.configurables = [] + InteractiveShell.clear_instance() def test_zmq_interactive_shell(kernel): - shell = ZMQInteractiveShell() - - with pytest.raises(RuntimeError): - shell.enable_gui("tk") - - with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) - shell.data_pub_class = MagicMock() # type:ignore - shell.data_pub - shell.kernel = kernel - shell.set_next_input("hi") - assert shell.get_parent() is None - if os.name == "posix": - shell.system_piped("ls") - else: - shell.system_piped("dir") - shell.ask_exit() + try: + shell = ZMQInteractiveShell() + + with pytest.raises(RuntimeError): + shell.enable_gui("tk") + + with warnings.catch_warnings(): + warnings.simplefilter("ignore", DeprecationWarning) + shell.data_pub_class = MagicMock() # type:ignore + shell.data_pub + shell.kernel = kernel + shell.set_next_input("hi") + assert shell.get_parent() is None + if os.name == "posix": + shell.system_piped("ls") + else: + shell.system_piped("dir") + shell.ask_exit() + finally: + shell.configurables = [] + ZMQInteractiveShell.clear_instance() if __name__ == "__main__": From de4851354e14eff27f60aa7116d34aec3e5d725d Mon Sep 17 00:00:00 2001 From: davidbrochart Date: Fri, 7 Mar 2025 08:35:58 +0000 Subject: [PATCH 90/97] Publish 7.0.0a1 SHA256 hashes: ipykernel-7.0.0a1-py3-none-any.whl: fa4a1401d3f1a86dc2c8e47592ae60d59818d3b597f68198bb99fd41d789d09a ipykernel-7.0.0a1.tar.gz: 7986c167f4501d4fab4873ab878196fd7dcc189a5cdea4bed9f973bbe3859098 --- CHANGELOG.md | 94 ++++++++++++++++++++++++++++++++++++++++++- ipykernel/_version.py | 2 +- 2 files changed, 93 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d314cf253..6fbfbcd46 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,98 @@ +## 7.0.0a1 + +([Full Changelog](https://github.com/ipython/ipykernel/compare/v7.0.0a0...cac11a08449d41ceb8a56e3095e78e00cb1626eb)) + +### Enhancements made + +- Replace BaseThread's add_task with start_soon [#1300](https://github.com/ipython/ipykernel/pull/1300) ([@davidbrochart](https://github.com/davidbrochart)) +- Use supported_features=['debugger'] in kernel info reply [#1296](https://github.com/ipython/ipykernel/pull/1296) ([@ianthomas23](https://github.com/ianthomas23)) +- Use zmq-anyio [#1291](https://github.com/ipython/ipykernel/pull/1291) ([@davidbrochart](https://github.com/davidbrochart)) + +### Bugs fixed + +- Fix OutStream using \_fid before being defined [#1373](https://github.com/ipython/ipykernel/pull/1373) ([@davidbrochart](https://github.com/davidbrochart)) +- Make kernelbase.\_eventloop_set event thread-safe [#1366](https://github.com/ipython/ipykernel/pull/1366) ([@davidbrochart](https://github.com/davidbrochart)) +- Remove implicit bind_kernel in `%qtconsole` [#1315](https://github.com/ipython/ipykernel/pull/1315) ([@minrk](https://github.com/minrk)) +- Fix ipykernel install [#1310](https://github.com/ipython/ipykernel/pull/1310) ([@davidbrochart](https://github.com/davidbrochart)) +- socket must be None, not shell_socket for default shell [#1281](https://github.com/ipython/ipykernel/pull/1281) ([@minrk](https://github.com/minrk)) +- restore zero-copy recv on shell messages [#1280](https://github.com/ipython/ipykernel/pull/1280) ([@minrk](https://github.com/minrk)) + +### Maintenance and upkeep improvements + +- chore: update pre-commit hooks [#1372](https://github.com/ipython/ipykernel/pull/1372) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- Remove nose import. [#1368](https://github.com/ipython/ipykernel/pull/1368) ([@Carreau](https://github.com/Carreau)) +- chore: update pre-commit hooks [#1355](https://github.com/ipython/ipykernel/pull/1355) ([@pre-commit-ci](https://github.com/pre-commit-ci)) +- Fix expected text depending on IPython version. [#1354](https://github.com/ipython/ipykernel/pull/1354) ([@Carreau](https://github.com/Carreau)) +- Another try at tracking down ResourceWarning with tracemalloc. [#1353](https://github.com/ipython/ipykernel/pull/1353) ([@Carreau](https://github.com/Carreau)) +- Remove deprecated modules since 4.3 (2016). [#1352](https://github.com/ipython/ipykernel/pull/1352) ([@Carreau](https://github.com/Carreau)) +- Try to reenable tests from downstream ipywidgets [#1350](https://github.com/ipython/ipykernel/pull/1350) ([@Carreau](https://github.com/Carreau)) +- Disable 3 failing downstream tests, but keep testing the rest. [#1349](https://github.com/ipython/ipykernel/pull/1349) ([@Carreau](https://github.com/Carreau)) +- Licence :: * trove classifers are deprecated [#1348](https://github.com/ipython/ipykernel/pull/1348) ([@Carreau](https://github.com/Carreau)) +- Pin sphinx to resolve docs build failures [#1347](https://github.com/ipython/ipykernel/pull/1347) ([@krassowski](https://github.com/krassowski)) +- Make our own mock kernel methods async [#1346](https://github.com/ipython/ipykernel/pull/1346) ([@Carreau](https://github.com/Carreau)) +- Try to debug non-closed iopub socket [#1345](https://github.com/ipython/ipykernel/pull/1345) ([@Carreau](https://github.com/Carreau)) +- Email is @python.org since 2018 [#1343](https://github.com/ipython/ipykernel/pull/1343) ([@Carreau](https://github.com/Carreau)) +- Remove unused ignores lints. [#1342](https://github.com/ipython/ipykernel/pull/1342) ([@Carreau](https://github.com/Carreau)) +- Enable ruff G002 and fix 6 occurences [#1341](https://github.com/ipython/ipykernel/pull/1341) ([@Carreau](https://github.com/Carreau)) +- Check ignores warnings are still relevant. [#1340](https://github.com/ipython/ipykernel/pull/1340) ([@Carreau](https://github.com/Carreau)) +- Move mypy disablinging error codes on a per-file basis [#1338](https://github.com/ipython/ipykernel/pull/1338) ([@Carreau](https://github.com/Carreau)) +- try to fix spyder kernel install [#1337](https://github.com/ipython/ipykernel/pull/1337) ([@Carreau](https://github.com/Carreau)) +- Remove test_check job [#1335](https://github.com/ipython/ipykernel/pull/1335) ([@Carreau](https://github.com/Carreau)) +- Refine deprecation error messages. [#1334](https://github.com/ipython/ipykernel/pull/1334) ([@Carreau](https://github.com/Carreau)) +- Bump mypy [#1333](https://github.com/ipython/ipykernel/pull/1333) ([@Carreau](https://github.com/Carreau)) +- Remove dead code. [#1332](https://github.com/ipython/ipykernel/pull/1332) ([@Carreau](https://github.com/Carreau)) +- Ignore or fix most of the remaining ruff 0.9.6 errors [#1331](https://github.com/ipython/ipykernel/pull/1331) ([@Carreau](https://github.com/Carreau)) +- minor code reformating valid ruff 0.9.6 [#1330](https://github.com/ipython/ipykernel/pull/1330) ([@Carreau](https://github.com/Carreau)) +- Some formatting changes to prepare bumping ruff pre-commit. [#1329](https://github.com/ipython/ipykernel/pull/1329) ([@Carreau](https://github.com/Carreau)) +- Manually update Codespell and fix new errors. [#1328](https://github.com/ipython/ipykernel/pull/1328) ([@Carreau](https://github.com/Carreau)) +- Manually update mdformat pre-commit and run it. [#1327](https://github.com/ipython/ipykernel/pull/1327) ([@Carreau](https://github.com/Carreau)) +- Manually update pre-commit hooks that do not trigger new errors/fixes. [#1326](https://github.com/ipython/ipykernel/pull/1326) ([@Carreau](https://github.com/Carreau)) +- Try to force precommit-ci to send autoupdate PRs. [#1325](https://github.com/ipython/ipykernel/pull/1325) ([@Carreau](https://github.com/Carreau)) +- Don't rerun test with --lf it hides failures. [#1324](https://github.com/ipython/ipykernel/pull/1324) ([@Carreau](https://github.com/Carreau)) +- Delete always skipped test, fix trio test, fix framelocal has not .clear() [#1322](https://github.com/ipython/ipykernel/pull/1322) ([@Carreau](https://github.com/Carreau)) +- Fix types lints [#1321](https://github.com/ipython/ipykernel/pull/1321) ([@Carreau](https://github.com/Carreau)) +- Remove link to numfocus for funding. [#1320](https://github.com/ipython/ipykernel/pull/1320) ([@Carreau](https://github.com/Carreau)) +- Remove downstream_check [#1318](https://github.com/ipython/ipykernel/pull/1318) ([@Carreau](https://github.com/Carreau)) +- Copy payloadpage.page from IPython [#1317](https://github.com/ipython/ipykernel/pull/1317) ([@Carreau](https://github.com/Carreau)) +- More Informative assert [#1314](https://github.com/ipython/ipykernel/pull/1314) ([@Carreau](https://github.com/Carreau)) +- Fix test_print_to_correct_cell_from_child_thread [#1312](https://github.com/ipython/ipykernel/pull/1312) ([@davidbrochart](https://github.com/davidbrochart)) +- make debugger class configurable [#1307](https://github.com/ipython/ipykernel/pull/1307) ([@smacke](https://github.com/smacke)) +- properly close OutStream and various fixes [#1305](https://github.com/ipython/ipykernel/pull/1305) ([@limwz01](https://github.com/limwz01)) +- Remove base setup [#1299](https://github.com/ipython/ipykernel/pull/1299) ([@davidbrochart](https://github.com/davidbrochart)) +- Suggest to make implementations of some function always return awaitable [#1295](https://github.com/ipython/ipykernel/pull/1295) ([@Carreau](https://github.com/Carreau)) +- Misc type annotations [#1294](https://github.com/ipython/ipykernel/pull/1294) ([@Carreau](https://github.com/Carreau)) +- Rely on intrsphinx_registry to keep intersphinx up to date. [#1290](https://github.com/ipython/ipykernel/pull/1290) ([@Carreau](https://github.com/Carreau)) +- Improve robustness of subshell concurrency tests using Barrier [#1288](https://github.com/ipython/ipykernel/pull/1288) ([@ianthomas23](https://github.com/ianthomas23)) +- Add 20 min timeout dowstream ipyparallel [#1287](https://github.com/ipython/ipykernel/pull/1287) ([@Carreau](https://github.com/Carreau)) +- Improve robustness of subshell concurrency tests [#1285](https://github.com/ipython/ipykernel/pull/1285) ([@ianthomas23](https://github.com/ianthomas23)) +- Drop support for Python 3.8 [#1284](https://github.com/ipython/ipykernel/pull/1284) ([@ianthomas23](https://github.com/ianthomas23)) +- remove deprecated ipyparallel methods now that they are broken anyway [#1282](https://github.com/ipython/ipykernel/pull/1282) ([@minrk](https://github.com/minrk)) +- start testing on 3.13 [#1277](https://github.com/ipython/ipykernel/pull/1277) ([@Carreau](https://github.com/Carreau)) +- Try to add workflow to publish nightlies [#1276](https://github.com/ipython/ipykernel/pull/1276) ([@Carreau](https://github.com/Carreau)) +- fix mixture of sync/async sockets in IOPubThread [#1275](https://github.com/ipython/ipykernel/pull/1275) ([@minrk](https://github.com/minrk)) +- Remove some potential dead-code. [#1273](https://github.com/ipython/ipykernel/pull/1273) ([@Carreau](https://github.com/Carreau)) + +### Deprecated features + +- Remove deprecated modules since 4.3 (2016). [#1352](https://github.com/ipython/ipykernel/pull/1352) ([@Carreau](https://github.com/Carreau)) +- Suggest to make implementations of some function always return awaitable [#1295](https://github.com/ipython/ipykernel/pull/1295) ([@Carreau](https://github.com/Carreau)) + +### Other merged PRs + +- Ensure test_start_app takes 1s to stop kernel [#1364](https://github.com/ipython/ipykernel/pull/1364) ([@davidbrochart](https://github.com/davidbrochart)) +- Test more python versions [#1358](https://github.com/ipython/ipykernel/pull/1358) ([@davidbrochart](https://github.com/davidbrochart)) + +### Contributors to this release + +([GitHub contributors page for this release](https://github.com/ipython/ipykernel/graphs/contributors?from=2024-10-22&to=2025-03-07&type=c)) + +[@Carreau](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3ACarreau+updated%3A2024-10-22..2025-03-07&type=Issues) | [@ccordoba12](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Accordoba12+updated%3A2024-10-22..2025-03-07&type=Issues) | [@davidbrochart](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Adavidbrochart+updated%3A2024-10-22..2025-03-07&type=Issues) | [@ianthomas23](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aianthomas23+updated%3A2024-10-22..2025-03-07&type=Issues) | [@krassowski](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Akrassowski+updated%3A2024-10-22..2025-03-07&type=Issues) | [@limwz01](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Alimwz01+updated%3A2024-10-22..2025-03-07&type=Issues) | [@minrk](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aminrk+updated%3A2024-10-22..2025-03-07&type=Issues) | [@pre-commit-ci](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Apre-commit-ci+updated%3A2024-10-22..2025-03-07&type=Issues) | [@smacke](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Asmacke+updated%3A2024-10-22..2025-03-07&type=Issues) + + + ## 7.0.0a0 ([Full Changelog](https://github.com/ipython/ipykernel/compare/v6.29.3...314cc49da6e7d69d74f4741d4ea6568e926d1819)) @@ -47,8 +139,6 @@ [@agronholm](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aagronholm+updated%3A2024-02-26..2024-10-22&type=Issues) | [@blink1073](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Ablink1073+updated%3A2024-02-26..2024-10-22&type=Issues) | [@bluss](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Abluss+updated%3A2024-02-26..2024-10-22&type=Issues) | [@Carreau](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3ACarreau+updated%3A2024-02-26..2024-10-22&type=Issues) | [@davidbrochart](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Adavidbrochart+updated%3A2024-02-26..2024-10-22&type=Issues) | [@gregory-shklover](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Agregory-shklover+updated%3A2024-02-26..2024-10-22&type=Issues) | [@hroncok](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Ahroncok+updated%3A2024-02-26..2024-10-22&type=Issues) | [@ianthomas23](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aianthomas23+updated%3A2024-02-26..2024-10-22&type=Issues) | [@ivanov](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aivanov+updated%3A2024-02-26..2024-10-22&type=Issues) | [@krassowski](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Akrassowski+updated%3A2024-02-26..2024-10-22&type=Issues) | [@maartenbreddels](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Amaartenbreddels+updated%3A2024-02-26..2024-10-22&type=Issues) | [@minrk](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Aminrk+updated%3A2024-02-26..2024-10-22&type=Issues) | [@mlucool](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3Amlucool+updated%3A2024-02-26..2024-10-22&type=Issues) | [@s-t-e-v-e-n-k](https://github.com/search?q=repo%3Aipython%2Fipykernel+involves%3As-t-e-v-e-n-k+updated%3A2024-02-26..2024-10-22&type=Issues) - - ## 6.29.5 ([Full Changelog](https://github.com/ipython/ipykernel/compare/v6.29.4...1e62d48298e353a9879fae99bc752f9bb48797ef)) diff --git a/ipykernel/_version.py b/ipykernel/_version.py index 166330638..e323d3f23 100644 --- a/ipykernel/_version.py +++ b/ipykernel/_version.py @@ -7,7 +7,7 @@ import re # Version string must appear intact for hatch versioning -__version__ = "7.0.0a0" +__version__ = "7.0.0a1" # Build up version_info tuple for backwards compatibility pattern = r"(?P\d+).(?P\d+).(?P\d+)(?P.*)" From 0d7f7219a176baf183bf552d340eb53aace5dfd1 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 11 Mar 2025 09:01:38 +0100 Subject: [PATCH 91/97] chore: update pre-commit hooks (#1375) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2547aab2e..8c01e520c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -22,7 +22,7 @@ repos: - id: trailing-whitespace - repo: https://github.com/python-jsonschema/check-jsonschema - rev: 0.31.2 + rev: 0.31.3 hooks: - id: check-github-workflows @@ -74,7 +74,7 @@ repos: - id: rst-inline-touching-normal - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.9 + rev: v0.9.10 hooks: - id: ruff types_or: [python, jupyter] From 83b51a60fcba003556a23322cb8c154fa27a8de2 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 18 Mar 2025 09:06:42 +0100 Subject: [PATCH 92/97] chore: update pre-commit hooks (#1378) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- ipykernel/kernelbase.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8c01e520c..8c9e3c640 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -74,7 +74,7 @@ repos: - id: rst-inline-touching-normal - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.10 + rev: v0.11.0 hooks: - id: ruff types_or: [python, jupyter] diff --git a/ipykernel/kernelbase.py b/ipykernel/kernelbase.py index 6465751c2..e260f2d27 100644 --- a/ipykernel/kernelbase.py +++ b/ipykernel/kernelbase.py @@ -70,7 +70,7 @@ def _accepts_parameters(meth, param_names): parameters = inspect.signature(meth).parameters - accepts = {param: False for param in param_names} + accepts = dict.fromkeys(param_names, False) for param in param_names: param_spec = parameters.get(param) From 9008dbb508091c2fc7f18f2a54268f112dc6f17a Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Fri, 21 Mar 2025 15:27:32 +0100 Subject: [PATCH 93/97] TQDM workaround due to unresponsive maintainer (#1363) Co-authored-by: Min RK --- ipykernel/zmqshell.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/ipykernel/zmqshell.py b/ipykernel/zmqshell.py index ef682f940..28475fb8d 100644 --- a/ipykernel/zmqshell.py +++ b/ipykernel/zmqshell.py @@ -472,6 +472,18 @@ def subshell(self, arg_s): class ZMQInteractiveShell(InteractiveShell): """A subclass of InteractiveShell for ZMQ.""" + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # tqdm has an incorrect detection of ZMQInteractiveShell when launch via + # a scheduler that bypass IPKernelApp Think of JupyterHub cluster + # spawners and co. as of end of Feb 2025, the maintainer has been + # unresponsive for 5 months, to our fix, so we implement a workaround. I + # don't like it but we have few other choices. + # See https://github.com/tqdm/tqdm/pull/1628 + if "IPKernelApp" not in self.config: + self.config.IPKernelApp.tqdm = "dummy value for https://github.com/tqdm/tqdm/pull/1628" + displayhook_class = Type(ZMQShellDisplayHook) display_pub_class = Type(ZMQDisplayPublisher) data_pub_class = Any() # type:ignore[assignment] From ff34a51075f7a6baa69124bdeae1974ba9ec4108 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 27 Mar 2025 08:54:08 +0100 Subject: [PATCH 94/97] chore: update pre-commit hooks (#1383) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8c9e3c640..17b8582e6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -74,7 +74,7 @@ repos: - id: rst-inline-touching-normal - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.11.0 + rev: v0.11.2 hooks: - id: ruff types_or: [python, jupyter] From 2c212648775646f1c92e8f3e977806dea14186da Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 1 Apr 2025 09:00:11 +0200 Subject: [PATCH 95/97] chore: update pre-commit hooks (#1385) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 17b8582e6..fea4685d0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -22,7 +22,7 @@ repos: - id: trailing-whitespace - repo: https://github.com/python-jsonschema/check-jsonschema - rev: 0.31.3 + rev: 0.32.1 hooks: - id: check-github-workflows From 5802cea49b204f42e326da793426083cbb1469aa Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 8 Apr 2025 14:20:47 +0200 Subject: [PATCH 96/97] chore: update pre-commit hooks (#1388) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index fea4685d0..363a2fe8f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -74,7 +74,7 @@ repos: - id: rst-inline-touching-normal - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.11.2 + rev: v0.11.4 hooks: - id: ruff types_or: [python, jupyter] From 8322a7684b004ee95f07b2f86f61e28146a5996d Mon Sep 17 00:00:00 2001 From: M Bussonnier Date: Thu, 24 Apr 2025 12:57:26 +0200 Subject: [PATCH 97/97] Add security.md (#1394) --- SECURITY.md | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 SECURITY.md diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000..36f74b3a5 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,10 @@ +# Security Policy + +## Reporting a Vulnerability + +All IPython and Jupyter security are handled via security@ipython.org. +You can find more information on the Jupyter website. https://jupyter.org/security + +## Tidelift + +You can report security concerns for ipykernel via the [Tidelift platform](https://tidelift.com/security).