Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions docs/source/user_guide/guide.rst
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ With jupyterlab support.

pip install -U "fastplotlib[notebook,imgui]"

.. note:: ``imgui-bundle`` is required for the ``NDWidget``

Without imgui
^^^^^^^^^^^^^

Expand Down
8 changes: 7 additions & 1 deletion fastplotlib/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,13 @@

# this must be the first import for auto-canvas detection
from .utils import loop # noqa
from .utils import (
config,
enumerate_adapters,
select_adapter,
print_wgpu_report,
protocols,
)
from .graphics import *
from .graphics.features import GraphicFeatureEvent
from .graphics.selectors import *
Expand All @@ -20,7 +27,6 @@
from .layouts import Figure

from .widgets import NDWidget, ImageWidget
from .utils import config, enumerate_adapters, select_adapter, print_wgpu_report


if len(enumerate_adapters()) < 1:
Expand Down
2 changes: 1 addition & 1 deletion fastplotlib/graphics/features/_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def _fix_data(self, data):
)

if data.itemsize == 8:
warn(f"casting {array.dtype} array to float32")
warn(f"casting {data.dtype} array to float32")
return data.astype(np.float32)

return data
Expand Down
2 changes: 1 addition & 1 deletion fastplotlib/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from .gpu import enumerate_adapters, select_adapter, print_wgpu_report
from ._plot_helpers import *
from .enums import *
from ._protocols import ArrayProtocol, ARRAY_LIKE_ATTRS
from .protocols import ARRAY_LIKE_ATTRS, ArrayProtocol, FutureProtocol, CudaArrayProtocol


@dataclass
Expand Down
33 changes: 0 additions & 33 deletions fastplotlib/utils/_protocols.py

This file was deleted.

26 changes: 23 additions & 3 deletions fastplotlib/utils/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@

from pygfx import Texture, Color

from .protocols import CudaArrayProtocol


cmap_catalog = cmap_lib.Catalog()

Expand Down Expand Up @@ -405,9 +407,22 @@ def parse_cmap_values(
return colors


def cuda_to_numpy(arr: CudaArrayProtocol) -> np.ndarray:
try:
import cupy
except ImportError:
raise ImportError(
"`cupy` is required to work with GPU arrays\npip install cupy"
)

return cupy.asnumpy(arr)


def subsample_array(
arr: np.ndarray, max_size: int = 1e6, ignore_dims: Sequence[int] | None = None
):
arr: CudaArrayProtocol,
max_size: int = 1e6,
ignore_dims: Sequence[int] | None = None,
) -> np.ndarray:
"""
Subsamples an input array while preserving its relative dimensional proportions.

Expand Down Expand Up @@ -476,7 +491,12 @@ def subsample_array(

slices = tuple(slices)

return np.asarray(arr[slices])
arr_sliced = arr[slices]

if isinstance(arr_sliced, CudaArrayProtocol):
return cuda_to_numpy(arr_sliced)

return arr_sliced


def heatmap_to_positions(heatmap: np.ndarray, xvals: np.ndarray) -> np.ndarray:
Expand Down
62 changes: 62 additions & 0 deletions fastplotlib/utils/protocols.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
from __future__ import annotations

from collections.abc import Callable
from typing import Any, Protocol, runtime_checkable


ARRAY_LIKE_ATTRS = [
"__array__",
"__array_ufunc__",
"dtype",
"shape",
"ndim",
"__getitem__",
]


@runtime_checkable
class ArrayProtocol(Protocol):
"""an object that is sufficiently array-like"""

def __array__(self) -> ArrayProtocol: ...

@property
def dtype(self) -> Any: ...

@property
def ndim(self) -> int: ...

@property
def shape(self) -> tuple[int, ...]: ...

def __getitem__(self, key) -> ArrayProtocol: ...


@runtime_checkable
class CudaArrayProtocol(Protocol):
"""an object that can be converted to a cupy array"""

def __cuda_array_interface__(self) -> CudaArrayProtocol: ...


@runtime_checkable
class FutureProtocol(Protocol):
"""An object that is sufficiently Future-like"""

def cancel(self): ...

def cancelled(self): ...

def running(self): ...

def done(self): ...

def add_done_callback(self, fn: Callable): ...

def result(self, timeout: float | None): ...

def exception(self, timeout: float | None): ...

def set_result(self, array: ArrayProtocol): ...

def set_exception(self, exception): ...
11 changes: 2 additions & 9 deletions fastplotlib/widgets/nd_widget/__init__.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,7 @@
from ...layouts import IMGUI

try:
import imgui_bundle
except ImportError:
HAS_XARRAY = False
else:
HAS_XARRAY = True


if IMGUI and HAS_XARRAY:
if IMGUI:
from ._base import NDProcessor, NDGraphic
from ._nd_positions import NDPositions, NDPositionsProcessor, ndp_extras
from ._nd_image import NDImageProcessor, NDImage
Expand All @@ -19,6 +12,6 @@
class NDWidget:
def __init__(self, *args, **kwargs):
raise ModuleNotFoundError(
"NDWidget requires `imgui-bundle` and `xarray` to be installed.\n"
"NDWidget requires `imgui-bundle` to be installed.\n"
"pip install imgui-bundle"
)
100 changes: 100 additions & 0 deletions fastplotlib/widgets/nd_widget/_async.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
from collections.abc import Generator
from concurrent.futures import Future

from ...utils import ArrayProtocol, FutureProtocol, CudaArrayProtocol, cuda_to_numpy


class FutureArray(Future):
def __init__(self, shape, dtype, timeout: float = 1.0):
self._shape = shape
self._dtype = dtype
self._timeout = timeout

super().__init__()

@property
def shape(self) -> tuple[int, ...]:
return self._shape

@property
def ndim(self) -> int:
return len(self.shape)

@property
def dtype(self) -> str:
return self._dtype

def __getitem__(self, item) -> ArrayProtocol:
return self.result(self._timeout)[item]

def __array__(self) -> ArrayProtocol:
return self.result(self._timeout)

def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
raise NotImplementedError

def __array_function__(self, func, types, *args, **kwargs):
raise NotImplementedError


# inspired by https://www.dabeaz.com/coroutines/
def start_coroutine(func):
"""
Starts coroutines for async arrays wrapped by NDProcessor.
Used by all NDGraphic.set_indices and NDGraphic._create_graphic.

It also immediately starts coroutines unless block=False is provided. It handles all the triage of possible
sync vs. async (Future-like) objects.

The only time when block=False is when ReferenceIndex._render_indices uses it to loop through setting all
indices, and then collect and send the results back down to NDProcessor.get().
"""

def start(
self, *args, **kwargs
) -> tuple[Generator, ArrayProtocol | CudaArrayProtocol | FutureProtocol] | None:
cr = func(self, *args, **kwargs)
try:
# begin coroutine
to_resolve: FutureProtocol | ArrayProtocol | CudaArrayProtocol = cr.send(
None
)
except StopIteration:
# NDProcessor.get() has no `yield` expression, not async, nothing to return
return None

block = kwargs.get("block", True)
timeout = kwargs.get("timeout", 1.0)

if block: # resolve Future immediately
try:
if isinstance(to_resolve, FutureProtocol):
# array is async, resolve future and send
cr.send(to_resolve.result(timeout=timeout))
elif isinstance(to_resolve, CudaArrayProtocol):
# array is on GPU, it is technically and on GPU, convert to numpy array on CPU
cr.send(cuda_to_numpy(to_resolve))
else:
# not async, just send the array
cr.send(to_resolve)
except StopIteration:
pass

else: # no block, probably resolving multiple futures simultaneously
if isinstance(to_resolve, FutureProtocol):
# data is async, return coroutine generator and future
# ReferenceIndex._render_indices() will manage them and wait to gather all futures
return cr, to_resolve
elif isinstance(to_resolve, CudaArrayProtocol):
# it is async technically, but it's a GPU array, ReferenceIndex._render_indices will manage it
return cr, to_resolve
else:
# not async, just send the array
try:
cr.send(to_resolve)
except (
StopIteration
): # has to be here because of the yield expression, i.e. it's a generator
pass

return start
Loading